Snap for 7156330 from 5eb491bfd249ca77c6de21c2895e1fd969ce4cdd to s-keystone-qcom-release

Change-Id: I1dc9e5d109d5ab95491510d2a91f4efbaf5ff3ca
diff --git a/Android.bp b/Android.bp
index 2575cf8..9220694 100644
--- a/Android.bp
+++ b/Android.bp
@@ -55,6 +55,7 @@
     "src/subgraph/depth-to-space.c",
     "src/subgraph/depthwise-convolution-2d.c",
     "src/subgraph/divide.c",
+    "src/subgraph/elu.c",
     "src/subgraph/floor.c",
     "src/subgraph/fully-connected.c",
     "src/subgraph/global-average-pooling-2d.c",
@@ -390,6 +391,18 @@
     "src/f32-vbinary/gen/vsubc-scalar-x2.c",
     "src/f32-vbinary/gen/vsubc-scalar-x4.c",
     "src/f32-vbinary/gen/vsubc-scalar-x8.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x1.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x2.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x3.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x5.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x6.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x1.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x2.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x3.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x5.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x6.c",
     "src/f32-vlrelu/gen/vlrelu-scalar-x1.c",
     "src/f32-vlrelu/gen/vlrelu-scalar-x2.c",
     "src/f32-vlrelu/gen/vlrelu-scalar-x4.c",
@@ -494,13 +507,6 @@
     "src/xx-copy/memcpy.c",
 ]
 
-PSIMD_ACCMATH_UKERNELS = [
-    "src/qs8-requantization/fp32-psimd.c",
-    "src/qs8-requantization/precise-psimd.c",
-    "src/qu8-requantization/fp32-psimd.c",
-    "src/qu8-requantization/precise-psimd.c",
-]
-
 // ISA-specific micro-kernels
 NEON_UKERNELS = [
     "src/f32-argmaxpool/4x-neon-c4.c",
@@ -518,6 +524,7 @@
     "src/f32-conv-hwc/gen/3x3s2p1c3x4-neon-2x2.c",
     "src/f32-conv-hwc/gen/3x3s2p1c3x8-neon-2x1.c",
     "src/f32-conv-hwc/gen/3x3s2p1c3x8-neon-2x2.c",
+    "src/f32-conv-hwc2chw/3x3s2p1c3x4-neon-2x2.c",
     "src/f32-dwconv/gen/up4x4-minmax-neon-acc2.c",
     "src/f32-dwconv/gen/up4x4-minmax-neon.c",
     "src/f32-dwconv/gen/up4x9-minmax-neon-acc2.c",
@@ -667,7 +674,6 @@
     "src/f32-relu/gen/neon-x4.c",
     "src/f32-relu/gen/neon-x8.c",
     "src/f32-rmax/neon.c",
-    "src/f32-sigmoid/gen/neon-frac-p9-p10-nr1recps-x16.c",
     "src/f32-sigmoid/gen/neon-rr2-lut64-p2-nr2recps-x4.c",
     "src/f32-sigmoid/gen/neon-rr2-lut64-p2-nr2recps-x8.c",
     "src/f32-sigmoid/gen/neon-rr2-lut64-p2-nr2recps-x12.c",
@@ -686,6 +692,19 @@
     "src/f32-sigmoid/gen/neon-rr2-p5-nr2recps-x16.c",
     "src/f32-sigmoid/gen/neon-rr2-p5-nr2recps-x20.c",
     "src/f32-sigmoid/gen/neon-rr2-p5-nr2recps-x24.c",
+    "src/f32-spmm/gen/4x1-minmax-neon-pipelined.c",
+    "src/f32-spmm/gen/4x1-minmax-neon-x2.c",
+    "src/f32-spmm/gen/4x1-minmax-neon.c",
+    "src/f32-spmm/gen/8x1-minmax-neon-pipelined.c",
+    "src/f32-spmm/gen/8x1-minmax-neon-x2.c",
+    "src/f32-spmm/gen/8x1-minmax-neon.c",
+    "src/f32-spmm/gen/12x1-minmax-neon.c",
+    "src/f32-spmm/gen/16x1-minmax-neon-pipelined.c",
+    "src/f32-spmm/gen/16x1-minmax-neon-x2.c",
+    "src/f32-spmm/gen/16x1-minmax-neon.c",
+    "src/f32-spmm/gen/32x1-minmax-neon-pipelined.c",
+    "src/f32-spmm/gen/32x1-minmax-neon-x2.c",
+    "src/f32-spmm/gen/32x1-minmax-neon.c",
     "src/f32-vbinary/gen/vadd-minmax-neon-x4.c",
     "src/f32-vbinary/gen/vadd-minmax-neon-x8.c",
     "src/f32-vbinary/gen/vaddc-minmax-neon-x4.c",
@@ -712,6 +731,18 @@
     "src/f32-vbinary/gen/vsub-minmax-neon-x8.c",
     "src/f32-vbinary/gen/vsubc-minmax-neon-x4.c",
     "src/f32-vbinary/gen/vsubc-minmax-neon-x8.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x12.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x20.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x24.c",
     "src/f32-vlrelu/gen/vlrelu-neon-x4.c",
     "src/f32-vlrelu/gen/vlrelu-neon-x8.c",
     "src/f32-vmulcaddc/gen/c4-minmax-neon-2x.c",
@@ -739,10 +770,6 @@
     "src/math/roundu-neon-cvt.c",
     "src/math/roundz-neon-addsub.c",
     "src/math/roundz-neon-cvt.c",
-    "src/math/sigmoid-neon-frac-p9-p10-nr1recps.c",
-    "src/math/sigmoid-neon-rr1-lut64-p2-nr2recps.c",
-    "src/math/sigmoid-neon-rr1-lut2048-p1-nr2recps.c",
-    "src/math/sigmoid-neon-rr1-p5-nr2recps.c",
     "src/math/sigmoid-neon-rr2-lut64-p2-nr2recps.c",
     "src/math/sigmoid-neon-rr2-lut2048-p1-nr2recps.c",
     "src/math/sigmoid-neon-rr2-p5-nr2recps.c",
@@ -932,6 +959,31 @@
     "src/f32-sigmoid/gen/neonfma-rr1-p5-nr2recps-x16.c",
     "src/f32-sigmoid/gen/neonfma-rr1-p5-nr2recps-x20.c",
     "src/f32-sigmoid/gen/neonfma-rr1-p5-nr2recps-x24.c",
+    "src/f32-spmm/gen/4x1-minmax-neonfma-pipelined.c",
+    "src/f32-spmm/gen/4x1-minmax-neonfma-x2.c",
+    "src/f32-spmm/gen/4x1-minmax-neonfma.c",
+    "src/f32-spmm/gen/8x1-minmax-neonfma-pipelined.c",
+    "src/f32-spmm/gen/8x1-minmax-neonfma-x2.c",
+    "src/f32-spmm/gen/8x1-minmax-neonfma.c",
+    "src/f32-spmm/gen/12x1-minmax-neonfma.c",
+    "src/f32-spmm/gen/16x1-minmax-neonfma-pipelined.c",
+    "src/f32-spmm/gen/16x1-minmax-neonfma-x2.c",
+    "src/f32-spmm/gen/16x1-minmax-neonfma.c",
+    "src/f32-spmm/gen/32x1-minmax-neonfma-pipelined.c",
+    "src/f32-spmm/gen/32x1-minmax-neonfma-x2.c",
+    "src/f32-spmm/gen/32x1-minmax-neonfma.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x4.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x8.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x12.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x16.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x20.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x24.c",
     "src/f32-vmulcaddc/gen/c4-minmax-neonfma-2x.c",
     "src/f32-vmulcaddc/gen/c8-minmax-neonfma-2x.c",
     "src/f32-vsqrt/gen/neonfma-nr1rsqrts1fma1adj-x4.c",
@@ -956,11 +1008,11 @@
     "src/f32-vsqrt/gen/neonfma-nr2fma1adj-x40.c",
     "src/math/exp-neonfma-rr2-lut64-p2.c",
     "src/math/exp-neonfma-rr2-p5.c",
+    "src/math/expm1minus-neonfma-rr1-lut16-p3.c",
+    "src/math/expm1minus-neonfma-rr1-p6.c",
     "src/math/expminus-neonfma-rr2-lut64-p2.c",
     "src/math/expminus-neonfma-rr2-lut2048-p1.c",
     "src/math/expminus-neonfma-rr2-p5.c",
-    "src/math/expm1minus-neonfma-rr1-lut16-p3.c",
-    "src/math/expm1minus-neonfma-rr1-p6.c",
     "src/math/sigmoid-neonfma-rr1-lut64-p2-nr1recps1fma.c",
     "src/math/sigmoid-neonfma-rr1-lut64-p2-nr2fma.c",
     "src/math/sigmoid-neonfma-rr1-lut64-p2-nr2recps.c",
@@ -1075,27 +1127,14 @@
     "src/f32-sigmoid/gen/neonfma-rr1-p5-div-x16.c",
     "src/f32-sigmoid/gen/neonfma-rr1-p5-div-x20.c",
     "src/f32-sigmoid/gen/neonfma-rr1-p5-div-x24.c",
-    "src/f32-spmm/gen/4x1-minmax-neonfma-pipelined.c",
-    "src/f32-spmm/gen/4x1-minmax-neonfma-x2.c",
-    "src/f32-spmm/gen/4x1-minmax-neonfma.c",
     "src/f32-spmm/gen/4x2-minmax-neonfma.c",
     "src/f32-spmm/gen/4x4-minmax-neonfma.c",
-    "src/f32-spmm/gen/8x1-minmax-neonfma-pipelined.c",
-    "src/f32-spmm/gen/8x1-minmax-neonfma-x2.c",
-    "src/f32-spmm/gen/8x1-minmax-neonfma.c",
     "src/f32-spmm/gen/8x2-minmax-neonfma.c",
     "src/f32-spmm/gen/8x4-minmax-neonfma.c",
-    "src/f32-spmm/gen/12x1-minmax-neonfma.c",
     "src/f32-spmm/gen/12x2-minmax-neonfma.c",
     "src/f32-spmm/gen/12x4-minmax-neonfma.c",
-    "src/f32-spmm/gen/16x1-minmax-neonfma-pipelined.c",
-    "src/f32-spmm/gen/16x1-minmax-neonfma-x2.c",
-    "src/f32-spmm/gen/16x1-minmax-neonfma.c",
     "src/f32-spmm/gen/16x2-minmax-neonfma.c",
     "src/f32-spmm/gen/16x4-minmax-neonfma.c",
-    "src/f32-spmm/gen/32x1-minmax-neonfma-pipelined.c",
-    "src/f32-spmm/gen/32x1-minmax-neonfma-x2.c",
-    "src/f32-spmm/gen/32x1-minmax-neonfma.c",
     "src/f32-spmm/gen/32x2-minmax-neonfma.c",
     "src/f32-spmm/gen/32x4-minmax-neonfma.c",
     "src/f32-vbinary/gen/vdiv-minmax-neon-x4.c",
@@ -1303,16 +1342,28 @@
     "src/f32-gemm/gen-inc/1x8inc-minmax-sse-dup.c",
     "src/f32-gemm/gen-inc/1x8inc-minmax-sse-load1.c",
     "src/f32-gemm/gen-inc/1x8s4inc-minmax-sse.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-sse-dup.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-sse-load1.c",
+    "src/f32-gemm/gen-inc/3x8s4inc-minmax-sse.c",
     "src/f32-gemm/gen-inc/4x8inc-minmax-sse-dup.c",
     "src/f32-gemm/gen-inc/4x8inc-minmax-sse-load1.c",
     "src/f32-gemm/gen-inc/4x8s4inc-minmax-sse.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-sse-dup.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-sse-load1.c",
+    "src/f32-gemm/gen-inc/5x8s4inc-minmax-sse.c",
     "src/f32-gemm/gen/1x8-minmax-sse-dup.c",
     "src/f32-gemm/gen/1x8-minmax-sse-load1.c",
     "src/f32-gemm/gen/1x8s4-minmax-sse.c",
+    "src/f32-gemm/gen/3x8-minmax-sse-dup.c",
+    "src/f32-gemm/gen/3x8-minmax-sse-load1.c",
+    "src/f32-gemm/gen/3x8s4-minmax-sse.c",
     "src/f32-gemm/gen/4x2c4-minmax-sse.c",
     "src/f32-gemm/gen/4x8-minmax-sse-dup.c",
     "src/f32-gemm/gen/4x8-minmax-sse-load1.c",
     "src/f32-gemm/gen/4x8s4-minmax-sse.c",
+    "src/f32-gemm/gen/5x8-minmax-sse-dup.c",
+    "src/f32-gemm/gen/5x8-minmax-sse-load1.c",
+    "src/f32-gemm/gen/5x8s4-minmax-sse.c",
     "src/f32-hswish/gen/hswish-sse-x4.c",
     "src/f32-hswish/gen/hswish-sse-x8.c",
     "src/f32-ibilinear/gen/sse-c4.c",
@@ -1320,10 +1371,16 @@
     "src/f32-igemm/gen/1x8-minmax-sse-dup.c",
     "src/f32-igemm/gen/1x8-minmax-sse-load1.c",
     "src/f32-igemm/gen/1x8s4-minmax-sse.c",
+    "src/f32-igemm/gen/3x8-minmax-sse-dup.c",
+    "src/f32-igemm/gen/3x8-minmax-sse-load1.c",
+    "src/f32-igemm/gen/3x8s4-minmax-sse.c",
     "src/f32-igemm/gen/4x2c4-minmax-sse.c",
     "src/f32-igemm/gen/4x8-minmax-sse-dup.c",
     "src/f32-igemm/gen/4x8-minmax-sse-load1.c",
     "src/f32-igemm/gen/4x8s4-minmax-sse.c",
+    "src/f32-igemm/gen/5x8-minmax-sse-dup.c",
+    "src/f32-igemm/gen/5x8-minmax-sse-load1.c",
+    "src/f32-igemm/gen/5x8s4-minmax-sse.c",
     "src/f32-maxpool/9p8x-minmax-sse-c4.c",
     "src/f32-pavgpool/9p8x-minmax-sse-c4.c",
     "src/f32-pavgpool/9x-minmax-sse-c4.c",
@@ -1397,6 +1454,18 @@
     "src/f32-argmaxpool/4x-sse2-c4.c",
     "src/f32-argmaxpool/9p8x-sse2-c4.c",
     "src/f32-argmaxpool/9x-sse2-c4.c",
+    "src/f32-gemm/gen-inc/1x8inc-minmax-sse2-dup.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-sse2-dup.c",
+    "src/f32-gemm/gen-inc/4x8inc-minmax-sse2-dup.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-sse2-dup.c",
+    "src/f32-gemm/gen/1x8-minmax-sse2-dup.c",
+    "src/f32-gemm/gen/3x8-minmax-sse2-dup.c",
+    "src/f32-gemm/gen/4x8-minmax-sse2-dup.c",
+    "src/f32-gemm/gen/5x8-minmax-sse2-dup.c",
+    "src/f32-igemm/gen/1x8-minmax-sse2-dup.c",
+    "src/f32-igemm/gen/3x8-minmax-sse2-dup.c",
+    "src/f32-igemm/gen/4x8-minmax-sse2-dup.c",
+    "src/f32-igemm/gen/5x8-minmax-sse2-dup.c",
     "src/f32-prelu/gen/sse2-2x4.c",
     "src/f32-prelu/gen/sse2-2x8.c",
     "src/f32-raddstoreexpminusmax/gen/sse2-p5-x4.c",
@@ -1423,6 +1492,18 @@
     "src/f32-sigmoid/gen/sse2-p5-div-x16.c",
     "src/f32-sigmoid/gen/sse2-p5-div-x20.c",
     "src/f32-sigmoid/gen/sse2-p5-div-x24.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x12.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x20.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x24.c",
     "src/f32-vlrelu/gen/vlrelu-sse2-x4.c",
     "src/f32-vlrelu/gen/vlrelu-sse2-x8.c",
     "src/f32-vrnd/gen/vrndd-sse2-x4.c",
@@ -1585,6 +1666,18 @@
     "src/f32-sigmoid/gen/sse41-p5-div-x16.c",
     "src/f32-sigmoid/gen/sse41-p5-div-x20.c",
     "src/f32-sigmoid/gen/sse41-p5-div-x24.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x12.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x20.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x24.c",
     "src/f32-vlrelu/gen/vlrelu-sse41-x4.c",
     "src/f32-vlrelu/gen/vlrelu-sse41-x8.c",
     "src/f32-vrnd/gen/vrndd-sse41-x4.c",
@@ -1757,6 +1850,24 @@
     "src/f32-vbinary/gen/vsub-minmax-avx-x16.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx-x8.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx-x16.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x8.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x16.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x24.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x32.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x40.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x48.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x32.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x40.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x48.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x24.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x32.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x40.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x48.c",
     "src/f32-vlrelu/gen/vlrelu-avx-x8.c",
     "src/f32-vlrelu/gen/vlrelu-avx-x16.c",
     "src/f32-vrnd/gen/vrndd-avx-x8.c",
@@ -1959,6 +2070,46 @@
     "src/f32-sigmoid/gen/avx2-rr1-p5-nr2fma-x64.c",
     "src/f32-sigmoid/gen/avx2-rr1-p5-nr2fma-x72.c",
     "src/f32-sigmoid/gen/avx2-rr1-p5-nr2fma-x80.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x8.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x16.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x24.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x32.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x40.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x48.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x56.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x64.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x72.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x80.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x8.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x16.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x24.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x32.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x40.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x48.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x56.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x64.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x72.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x80.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x8.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x16.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x24.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x32.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x40.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x48.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x56.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x64.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x72.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x80.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x8.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x16.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x24.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x32.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x40.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x48.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x56.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x64.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x72.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x80.c",
     "src/f32-vscaleexpminusmax/gen/avx2-p5-x8.c",
     "src/f32-vscaleexpminusmax/gen/avx2-p5-x16.c",
     "src/f32-vscaleexpminusmax/gen/avx2-p5-x24.c",
@@ -1986,11 +2137,11 @@
     "src/math/exp-avx2-rr2-lut8-p3-perm.c",
     "src/math/exp-avx2-rr2-lut8-p4-perm.c",
     "src/math/exp-avx2-rr2-p5.c",
-    "src/math/expminus-avx2-rr2-p5.c",
     "src/math/expm1minus-avx2-rr1-lut4-p4-perm.c",
     "src/math/expm1minus-avx2-rr1-lut8-p4-perm.c",
     "src/math/expm1minus-avx2-rr1-lut16-p3-gather.c",
     "src/math/expm1minus-avx2-rr1-p6.c",
+    "src/math/expminus-avx2-rr2-p5.c",
     "src/math/extexp-avx2-p5.c",
     "src/math/sigmoid-avx2-rr1-lut64-p2-gather-div.c",
     "src/math/sigmoid-avx2-rr1-lut64-p2-gather-nr1fma.c",
@@ -2187,6 +2338,22 @@
     "src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x16.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x32.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x48.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x64.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x80.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x96.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x112.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x128.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x16.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x32.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x48.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x64.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x80.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x96.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x112.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x128.c",
     "src/f32-vlrelu/gen/vlrelu-avx512f-x16.c",
     "src/f32-vlrelu/gen/vlrelu-avx512f-x32.c",
     "src/f32-vrnd/gen/vrndd-avx512f-x16.c",
@@ -2534,31 +2701,6 @@
 }
 
 cc_library_static {
-    name: "xnnpack_psimd_accmath_ukernels",
-    defaults: ["xnnpack_internal_default"],
-    srcs: PSIMD_ACCMATH_UKERNELS,
-    cflags: [
-        "-O3",
-    ],
-    arch: {
-        arm: {
-            cflags: [
-                "-marm",
-                "-mfpu=neon",
-            ],
-        },
-    },
-    header_libs: [
-        "fp16_headers",
-        "psimd_headers",
-    ],
-    static_libs: [
-        "libpthreadpool",
-        "xnnpack_tables",
-    ],
-}
-
-cc_library_static {
     name: "xnnpack_neon_ukernels",
     defaults: ["xnnpack_internal_default"],
     arch: {
@@ -2977,7 +3119,6 @@
     arch: {
         arm: {
             whole_static_libs: [
-                "xnnpack_psimd_accmath_ukernels",
                 "xnnpack_neon_ukernels",
                 "xnnpack_neonfma_ukernels",
                 "xnnpack_neonv8_ukernels",
@@ -2987,7 +3128,6 @@
         },
         arm64: {
             whole_static_libs: [
-                "xnnpack_psimd_accmath_ukernels",
                 "xnnpack_neon_ukernels",
                 "xnnpack_neonfma_ukernels",
                 "xnnpack_neonv8_ukernels",
@@ -2998,7 +3138,6 @@
         },
         x86: {
             whole_static_libs: [
-                "xnnpack_psimd_accmath_ukernels",
                 "xnnpack_sse2_ukernels",
                 "xnnpack_ssse3_ukernels",
                 "xnnpack_sse41_ukernels",
@@ -3012,7 +3151,6 @@
         },
         x86_64: {
             whole_static_libs: [
-                "xnnpack_psimd_accmath_ukernels",
                 "xnnpack_sse2_ukernels",
                 "xnnpack_ssse3_ukernels",
                 "xnnpack_sse41_ukernels",
diff --git a/BUILD.bazel b/BUILD.bazel
index 25414a0..2fb6a63 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -88,6 +88,7 @@
     "src/subgraph/depth-to-space.c",
     "src/subgraph/depthwise-convolution-2d.c",
     "src/subgraph/divide.c",
+    "src/subgraph/elu.c",
     "src/subgraph/floor.c",
     "src/subgraph/fully-connected.c",
     "src/subgraph/global-average-pooling-2d.c",
@@ -423,6 +424,18 @@
     "src/f32-vbinary/gen/vsubc-scalar-x2.c",
     "src/f32-vbinary/gen/vsubc-scalar-x4.c",
     "src/f32-vbinary/gen/vsubc-scalar-x8.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x1.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x2.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x3.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x5.c",
+    "src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x6.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x1.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x2.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x3.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x5.c",
+    "src/f32-velu/gen/velu-scalar-rr2-p6-x6.c",
     "src/f32-vlrelu/gen/vlrelu-scalar-x1.c",
     "src/f32-vlrelu/gen/vlrelu-scalar-x2.c",
     "src/f32-vlrelu/gen/vlrelu-scalar-x4.c",
@@ -694,6 +707,18 @@
     "src/f32-vbinary/gen/vsubc-relu-wasm-x2.c",
     "src/f32-vbinary/gen/vsubc-relu-wasm-x4.c",
     "src/f32-vbinary/gen/vsubc-relu-wasm-x8.c",
+    "src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x1.c",
+    "src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x2.c",
+    "src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x3.c",
+    "src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x5.c",
+    "src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x6.c",
+    "src/f32-velu/gen/velu-wasm-rr2-p6-x1.c",
+    "src/f32-velu/gen/velu-wasm-rr2-p6-x2.c",
+    "src/f32-velu/gen/velu-wasm-rr2-p6-x3.c",
+    "src/f32-velu/gen/velu-wasm-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-wasm-rr2-p6-x5.c",
+    "src/f32-velu/gen/velu-wasm-rr2-p6-x6.c",
     "src/f32-vlrelu/gen/vlrelu-wasm-x1.c",
     "src/f32-vlrelu/gen/vlrelu-wasm-x2.c",
     "src/f32-vlrelu/gen/vlrelu-wasm-x4.c",
@@ -715,136 +740,274 @@
     "src/f32-clamp/gen/wasmsimd-x86-x4.c",
     "src/f32-clamp/gen/wasmsimd-x86-x8.c",
     "src/f32-conv-hwc2chw/3x3s2p1c3x4-wasmsimd-2x2.c",
-    "src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-arm.c",
-    "src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-x86.c",
+    "src/f32-dwconv/gen/up4x4-minmax-wasmsimd-arm-acc2.c",
     "src/f32-dwconv/gen/up4x4-minmax-wasmsimd-arm.c",
+    "src/f32-dwconv/gen/up4x4-minmax-wasmsimd-x86-acc2.c",
     "src/f32-dwconv/gen/up4x4-minmax-wasmsimd-x86.c",
     "src/f32-dwconv/gen/up4x4-wasmsimd.c",
-    "src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-arm.c",
-    "src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-x86.c",
+    "src/f32-dwconv/gen/up4x9-minmax-wasmsimd-arm-acc2.c",
     "src/f32-dwconv/gen/up4x9-minmax-wasmsimd-arm.c",
+    "src/f32-dwconv/gen/up4x9-minmax-wasmsimd-x86-acc2.c",
     "src/f32-dwconv/gen/up4x9-minmax-wasmsimd-x86.c",
     "src/f32-dwconv/gen/up4x9-wasmsimd.c",
-    "src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-arm.c",
-    "src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-x86.c",
+    "src/f32-dwconv/gen/up4x25-minmax-wasmsimd-arm-acc2.c",
     "src/f32-dwconv/gen/up4x25-minmax-wasmsimd-arm.c",
+    "src/f32-dwconv/gen/up4x25-minmax-wasmsimd-x86-acc2.c",
     "src/f32-dwconv/gen/up4x25-minmax-wasmsimd-x86.c",
     "src/f32-dwconv/gen/up4x25-wasmsimd.c",
-    "src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-arm.c",
-    "src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-x86.c",
+    "src/f32-dwconv/gen/up8x4-minmax-wasmsimd-arm-acc2.c",
     "src/f32-dwconv/gen/up8x4-minmax-wasmsimd-arm.c",
+    "src/f32-dwconv/gen/up8x4-minmax-wasmsimd-x86-acc2.c",
     "src/f32-dwconv/gen/up8x4-minmax-wasmsimd-x86.c",
     "src/f32-dwconv/gen/up8x4-wasmsimd.c",
-    "src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-arm.c",
-    "src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-x86.c",
+    "src/f32-dwconv/gen/up8x9-minmax-wasmsimd-arm-acc2.c",
     "src/f32-dwconv/gen/up8x9-minmax-wasmsimd-arm.c",
+    "src/f32-dwconv/gen/up8x9-minmax-wasmsimd-x86-acc2.c",
     "src/f32-dwconv/gen/up8x9-minmax-wasmsimd-x86.c",
     "src/f32-dwconv/gen/up8x9-wasmsimd.c",
-    "src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-arm.c",
-    "src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-x86.c",
+    "src/f32-dwconv/gen/up8x25-minmax-wasmsimd-arm-acc2.c",
     "src/f32-dwconv/gen/up8x25-minmax-wasmsimd-arm.c",
+    "src/f32-dwconv/gen/up8x25-minmax-wasmsimd-x86-acc2.c",
     "src/f32-dwconv/gen/up8x25-minmax-wasmsimd-x86.c",
     "src/f32-dwconv/gen/up8x25-wasmsimd.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc2.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc3.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4-acc2.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-3x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-4x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-5x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-6x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc2.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc3.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4-acc2.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-3x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-4x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-5x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-6x4.c",
-    "src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-arm.c",
-    "src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-x86.c",
-    "src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-arm.c",
-    "src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-x86.c",
-    "src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-arm.c",
-    "src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-x86.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-5x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-6x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-5x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-6x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-5x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-6x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-5x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-6x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-5x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc5.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-5x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-5x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc5.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-5x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc5.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc5.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc3.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4-acc2.c",
+    "src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4.c",
     "src/f32-gavgpool-cw/wasmsimd-arm-x4.c",
     "src/f32-gavgpool-cw/wasmsimd-x86-x4.c",
     "src/f32-gavgpool/7p7x-minmax-wasmsimd-arm-c4.c",
     "src/f32-gavgpool/7p7x-minmax-wasmsimd-x86-c4.c",
     "src/f32-gavgpool/7x-minmax-wasmsimd-arm-c4.c",
     "src/f32-gavgpool/7x-minmax-wasmsimd-x86-c4.c",
-    "src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen-inc/1x8s4inc-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen-inc/1x8s4inc-minmax-wasmsimd-x86.c",
-    "src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen-inc/3x8s4inc-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen-inc/3x8s4inc-minmax-wasmsimd-x86.c",
-    "src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen-inc/4x8s4inc-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen-inc/4x8s4inc-minmax-wasmsimd-x86.c",
-    "src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen-inc/5x8s4inc-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen-inc/5x8s4inc-minmax-wasmsimd-x86.c",
-    "src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen-inc/6x8s4inc-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen-inc/6x8s4inc-minmax-wasmsimd-x86.c",
-    "src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen/1x8-relu-wasmsimd-splat.c",
     "src/f32-gemm/gen/1x8-wasmsimd-splat.c",
     "src/f32-gemm/gen/1x8s4-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen/1x8s4-minmax-wasmsimd-x86.c",
-    "src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen/3x8s4-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen/3x8s4-minmax-wasmsimd-x86.c",
     "src/f32-gemm/gen/4x2c4-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen/4x2c4-minmax-wasmsimd-x86.c",
     "src/f32-gemm/gen/4x2c4-relu-wasmsimd.c",
     "src/f32-gemm/gen/4x2c4-wasmsimd.c",
-    "src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen/4x8-relu-wasmsimd-splat.c",
     "src/f32-gemm/gen/4x8-wasmsimd-splat.c",
     "src/f32-gemm/gen/4x8s4-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen/4x8s4-minmax-wasmsimd-x86.c",
-    "src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen/5x8-relu-wasmsimd-splat.c",
     "src/f32-gemm/gen/5x8-wasmsimd-splat.c",
     "src/f32-gemm/gen/5x8s4-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen/5x8s4-minmax-wasmsimd-x86.c",
-    "src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-gemm/gen/6x8s4-minmax-wasmsimd-arm.c",
     "src/f32-gemm/gen/6x8s4-minmax-wasmsimd-x86.c",
     "src/f32-hswish/gen/hswish-wasmsimd-x4.c",
@@ -854,44 +1017,44 @@
     "src/f32-ibilinear-chw/gen/wasmsimd-p8.c",
     "src/f32-ibilinear/gen/wasmsimd-c4.c",
     "src/f32-ibilinear/gen/wasmsimd-c8.c",
-    "src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-igemm/gen/1x8-relu-wasmsimd-splat.c",
     "src/f32-igemm/gen/1x8-wasmsimd-splat.c",
     "src/f32-igemm/gen/1x8s4-minmax-wasmsimd-arm.c",
     "src/f32-igemm/gen/1x8s4-minmax-wasmsimd-x86.c",
-    "src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-igemm/gen/3x8s4-minmax-wasmsimd-arm.c",
     "src/f32-igemm/gen/3x8s4-minmax-wasmsimd-x86.c",
     "src/f32-igemm/gen/4x2c4-minmax-wasmsimd-arm.c",
     "src/f32-igemm/gen/4x2c4-minmax-wasmsimd-x86.c",
     "src/f32-igemm/gen/4x2c4-relu-wasmsimd.c",
     "src/f32-igemm/gen/4x2c4-wasmsimd.c",
-    "src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-igemm/gen/4x8-relu-wasmsimd-splat.c",
     "src/f32-igemm/gen/4x8-wasmsimd-splat.c",
     "src/f32-igemm/gen/4x8s4-minmax-wasmsimd-arm.c",
     "src/f32-igemm/gen/4x8s4-minmax-wasmsimd-x86.c",
-    "src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-igemm/gen/5x8-relu-wasmsimd-splat.c",
     "src/f32-igemm/gen/5x8-wasmsimd-splat.c",
     "src/f32-igemm/gen/5x8s4-minmax-wasmsimd-arm.c",
     "src/f32-igemm/gen/5x8s4-minmax-wasmsimd-x86.c",
-    "src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c",
-    "src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c",
-    "src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c",
+    "src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c",
+    "src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-igemm/gen/6x8s4-minmax-wasmsimd-arm.c",
     "src/f32-igemm/gen/6x8s4-minmax-wasmsimd-x86.c",
     "src/f32-maxpool/9p8x-minmax-wasmsimd-arm-c4.c",
@@ -900,8 +1063,8 @@
     "src/f32-pavgpool/9p8x-minmax-wasmsimd-x86-c4.c",
     "src/f32-pavgpool/9x-minmax-wasmsimd-arm-c4.c",
     "src/f32-pavgpool/9x-minmax-wasmsimd-x86-c4.c",
-    "src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-arm.c",
-    "src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-x86.c",
+    "src/f32-ppmm/gen/4x8-minmax-wasmsimd-arm-splat.c",
+    "src/f32-ppmm/gen/4x8-minmax-wasmsimd-x86-splat.c",
     "src/f32-prelu/gen/wasmsimd-bitselect-1x4.c",
     "src/f32-prelu/gen/wasmsimd-bitselect-1x8.c",
     "src/f32-prelu/gen/wasmsimd-bitselect-1x16.c",
@@ -1139,6 +1302,30 @@
     "src/f32-vbinary/gen/vsubc-wasmsimd-x4.c",
     "src/f32-vbinary/gen/vsubc-wasmsimd-x8.c",
     "src/f32-vbinary/gen/vsubc-wasmsimd-x16.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x12.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x20.c",
+    "src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x24.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x12.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x20.c",
+    "src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x24.c",
     "src/f32-vlrelu/gen/vlrelu-wasmsimd-bitselect-x4.c",
     "src/f32-vlrelu/gen/vlrelu-wasmsimd-bitselect-x8.c",
     "src/f32-vlrelu/gen/vlrelu-wasmsimd-minmax-x4.c",
@@ -1228,13 +1415,6 @@
     "src/x32-zip/xm-wasmsimd.c",
 ]
 
-PSIMD_ACCMATH_UKERNELS = [
-    "src/qs8-requantization/fp32-psimd.c",
-    "src/qs8-requantization/precise-psimd.c",
-    "src/qu8-requantization/fp32-psimd.c",
-    "src/qu8-requantization/precise-psimd.c",
-]
-
 # ISA-specific micro-kernels
 NEON_UKERNELS = [
     "src/f32-argmaxpool/4x-neon-c4.c",
@@ -1252,6 +1432,7 @@
     "src/f32-conv-hwc/gen/3x3s2p1c3x4-neon-2x2.c",
     "src/f32-conv-hwc/gen/3x3s2p1c3x8-neon-2x1.c",
     "src/f32-conv-hwc/gen/3x3s2p1c3x8-neon-2x2.c",
+    "src/f32-conv-hwc2chw/3x3s2p1c3x4-neon-2x2.c",
     "src/f32-dwconv/gen/up4x4-minmax-neon-acc2.c",
     "src/f32-dwconv/gen/up4x4-minmax-neon.c",
     "src/f32-dwconv/gen/up4x9-minmax-neon-acc2.c",
@@ -1401,7 +1582,6 @@
     "src/f32-relu/gen/neon-x4.c",
     "src/f32-relu/gen/neon-x8.c",
     "src/f32-rmax/neon.c",
-    "src/f32-sigmoid/gen/neon-frac-p9-p10-nr1recps-x16.c",
     "src/f32-sigmoid/gen/neon-rr2-lut64-p2-nr2recps-x4.c",
     "src/f32-sigmoid/gen/neon-rr2-lut64-p2-nr2recps-x8.c",
     "src/f32-sigmoid/gen/neon-rr2-lut64-p2-nr2recps-x12.c",
@@ -1420,6 +1600,19 @@
     "src/f32-sigmoid/gen/neon-rr2-p5-nr2recps-x16.c",
     "src/f32-sigmoid/gen/neon-rr2-p5-nr2recps-x20.c",
     "src/f32-sigmoid/gen/neon-rr2-p5-nr2recps-x24.c",
+    "src/f32-spmm/gen/4x1-minmax-neon-pipelined.c",
+    "src/f32-spmm/gen/4x1-minmax-neon-x2.c",
+    "src/f32-spmm/gen/4x1-minmax-neon.c",
+    "src/f32-spmm/gen/8x1-minmax-neon-pipelined.c",
+    "src/f32-spmm/gen/8x1-minmax-neon-x2.c",
+    "src/f32-spmm/gen/8x1-minmax-neon.c",
+    "src/f32-spmm/gen/12x1-minmax-neon.c",
+    "src/f32-spmm/gen/16x1-minmax-neon-pipelined.c",
+    "src/f32-spmm/gen/16x1-minmax-neon-x2.c",
+    "src/f32-spmm/gen/16x1-minmax-neon.c",
+    "src/f32-spmm/gen/32x1-minmax-neon-pipelined.c",
+    "src/f32-spmm/gen/32x1-minmax-neon-x2.c",
+    "src/f32-spmm/gen/32x1-minmax-neon.c",
     "src/f32-vbinary/gen/vadd-minmax-neon-x4.c",
     "src/f32-vbinary/gen/vadd-minmax-neon-x8.c",
     "src/f32-vbinary/gen/vaddc-minmax-neon-x4.c",
@@ -1446,6 +1639,18 @@
     "src/f32-vbinary/gen/vsub-minmax-neon-x8.c",
     "src/f32-vbinary/gen/vsubc-minmax-neon-x4.c",
     "src/f32-vbinary/gen/vsubc-minmax-neon-x8.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-neon-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x12.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x20.c",
+    "src/f32-velu/gen/velu-neon-rr2-p6-x24.c",
     "src/f32-vlrelu/gen/vlrelu-neon-x4.c",
     "src/f32-vlrelu/gen/vlrelu-neon-x8.c",
     "src/f32-vmulcaddc/gen/c4-minmax-neon-2x.c",
@@ -1473,10 +1678,6 @@
     "src/math/roundu-neon-cvt.c",
     "src/math/roundz-neon-addsub.c",
     "src/math/roundz-neon-cvt.c",
-    "src/math/sigmoid-neon-frac-p9-p10-nr1recps.c",
-    "src/math/sigmoid-neon-rr1-lut64-p2-nr2recps.c",
-    "src/math/sigmoid-neon-rr1-lut2048-p1-nr2recps.c",
-    "src/math/sigmoid-neon-rr1-p5-nr2recps.c",
     "src/math/sigmoid-neon-rr2-lut64-p2-nr2recps.c",
     "src/math/sigmoid-neon-rr2-lut2048-p1-nr2recps.c",
     "src/math/sigmoid-neon-rr2-p5-nr2recps.c",
@@ -1666,6 +1867,31 @@
     "src/f32-sigmoid/gen/neonfma-rr1-p5-nr2recps-x16.c",
     "src/f32-sigmoid/gen/neonfma-rr1-p5-nr2recps-x20.c",
     "src/f32-sigmoid/gen/neonfma-rr1-p5-nr2recps-x24.c",
+    "src/f32-spmm/gen/4x1-minmax-neonfma-pipelined.c",
+    "src/f32-spmm/gen/4x1-minmax-neonfma-x2.c",
+    "src/f32-spmm/gen/4x1-minmax-neonfma.c",
+    "src/f32-spmm/gen/8x1-minmax-neonfma-pipelined.c",
+    "src/f32-spmm/gen/8x1-minmax-neonfma-x2.c",
+    "src/f32-spmm/gen/8x1-minmax-neonfma.c",
+    "src/f32-spmm/gen/12x1-minmax-neonfma.c",
+    "src/f32-spmm/gen/16x1-minmax-neonfma-pipelined.c",
+    "src/f32-spmm/gen/16x1-minmax-neonfma-x2.c",
+    "src/f32-spmm/gen/16x1-minmax-neonfma.c",
+    "src/f32-spmm/gen/32x1-minmax-neonfma-pipelined.c",
+    "src/f32-spmm/gen/32x1-minmax-neonfma-x2.c",
+    "src/f32-spmm/gen/32x1-minmax-neonfma.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x4.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x8.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x12.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x16.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x20.c",
+    "src/f32-velu/gen/velu-neonfma-rr1-p6-x24.c",
     "src/f32-vmulcaddc/gen/c4-minmax-neonfma-2x.c",
     "src/f32-vmulcaddc/gen/c8-minmax-neonfma-2x.c",
     "src/f32-vsqrt/gen/neonfma-nr1rsqrts1fma1adj-x4.c",
@@ -1690,11 +1916,11 @@
     "src/f32-vsqrt/gen/neonfma-nr2fma1adj-x40.c",
     "src/math/exp-neonfma-rr2-lut64-p2.c",
     "src/math/exp-neonfma-rr2-p5.c",
+    "src/math/expm1minus-neonfma-rr1-lut16-p3.c",
+    "src/math/expm1minus-neonfma-rr1-p6.c",
     "src/math/expminus-neonfma-rr2-lut64-p2.c",
     "src/math/expminus-neonfma-rr2-lut2048-p1.c",
     "src/math/expminus-neonfma-rr2-p5.c",
-    "src/math/expm1minus-neonfma-rr1-lut16-p3.c",
-    "src/math/expm1minus-neonfma-rr1-p6.c",
     "src/math/sigmoid-neonfma-rr1-lut64-p2-nr1recps1fma.c",
     "src/math/sigmoid-neonfma-rr1-lut64-p2-nr2fma.c",
     "src/math/sigmoid-neonfma-rr1-lut64-p2-nr2recps.c",
@@ -1809,27 +2035,14 @@
     "src/f32-sigmoid/gen/neonfma-rr1-p5-div-x16.c",
     "src/f32-sigmoid/gen/neonfma-rr1-p5-div-x20.c",
     "src/f32-sigmoid/gen/neonfma-rr1-p5-div-x24.c",
-    "src/f32-spmm/gen/4x1-minmax-neonfma-pipelined.c",
-    "src/f32-spmm/gen/4x1-minmax-neonfma-x2.c",
-    "src/f32-spmm/gen/4x1-minmax-neonfma.c",
     "src/f32-spmm/gen/4x2-minmax-neonfma.c",
     "src/f32-spmm/gen/4x4-minmax-neonfma.c",
-    "src/f32-spmm/gen/8x1-minmax-neonfma-pipelined.c",
-    "src/f32-spmm/gen/8x1-minmax-neonfma-x2.c",
-    "src/f32-spmm/gen/8x1-minmax-neonfma.c",
     "src/f32-spmm/gen/8x2-minmax-neonfma.c",
     "src/f32-spmm/gen/8x4-minmax-neonfma.c",
-    "src/f32-spmm/gen/12x1-minmax-neonfma.c",
     "src/f32-spmm/gen/12x2-minmax-neonfma.c",
     "src/f32-spmm/gen/12x4-minmax-neonfma.c",
-    "src/f32-spmm/gen/16x1-minmax-neonfma-pipelined.c",
-    "src/f32-spmm/gen/16x1-minmax-neonfma-x2.c",
-    "src/f32-spmm/gen/16x1-minmax-neonfma.c",
     "src/f32-spmm/gen/16x2-minmax-neonfma.c",
     "src/f32-spmm/gen/16x4-minmax-neonfma.c",
-    "src/f32-spmm/gen/32x1-minmax-neonfma-pipelined.c",
-    "src/f32-spmm/gen/32x1-minmax-neonfma-x2.c",
-    "src/f32-spmm/gen/32x1-minmax-neonfma.c",
     "src/f32-spmm/gen/32x2-minmax-neonfma.c",
     "src/f32-spmm/gen/32x4-minmax-neonfma.c",
     "src/f32-vbinary/gen/vdiv-minmax-neon-x4.c",
@@ -2037,16 +2250,28 @@
     "src/f32-gemm/gen-inc/1x8inc-minmax-sse-dup.c",
     "src/f32-gemm/gen-inc/1x8inc-minmax-sse-load1.c",
     "src/f32-gemm/gen-inc/1x8s4inc-minmax-sse.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-sse-dup.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-sse-load1.c",
+    "src/f32-gemm/gen-inc/3x8s4inc-minmax-sse.c",
     "src/f32-gemm/gen-inc/4x8inc-minmax-sse-dup.c",
     "src/f32-gemm/gen-inc/4x8inc-minmax-sse-load1.c",
     "src/f32-gemm/gen-inc/4x8s4inc-minmax-sse.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-sse-dup.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-sse-load1.c",
+    "src/f32-gemm/gen-inc/5x8s4inc-minmax-sse.c",
     "src/f32-gemm/gen/1x8-minmax-sse-dup.c",
     "src/f32-gemm/gen/1x8-minmax-sse-load1.c",
     "src/f32-gemm/gen/1x8s4-minmax-sse.c",
+    "src/f32-gemm/gen/3x8-minmax-sse-dup.c",
+    "src/f32-gemm/gen/3x8-minmax-sse-load1.c",
+    "src/f32-gemm/gen/3x8s4-minmax-sse.c",
     "src/f32-gemm/gen/4x2c4-minmax-sse.c",
     "src/f32-gemm/gen/4x8-minmax-sse-dup.c",
     "src/f32-gemm/gen/4x8-minmax-sse-load1.c",
     "src/f32-gemm/gen/4x8s4-minmax-sse.c",
+    "src/f32-gemm/gen/5x8-minmax-sse-dup.c",
+    "src/f32-gemm/gen/5x8-minmax-sse-load1.c",
+    "src/f32-gemm/gen/5x8s4-minmax-sse.c",
     "src/f32-hswish/gen/hswish-sse-x4.c",
     "src/f32-hswish/gen/hswish-sse-x8.c",
     "src/f32-ibilinear/gen/sse-c4.c",
@@ -2054,10 +2279,16 @@
     "src/f32-igemm/gen/1x8-minmax-sse-dup.c",
     "src/f32-igemm/gen/1x8-minmax-sse-load1.c",
     "src/f32-igemm/gen/1x8s4-minmax-sse.c",
+    "src/f32-igemm/gen/3x8-minmax-sse-dup.c",
+    "src/f32-igemm/gen/3x8-minmax-sse-load1.c",
+    "src/f32-igemm/gen/3x8s4-minmax-sse.c",
     "src/f32-igemm/gen/4x2c4-minmax-sse.c",
     "src/f32-igemm/gen/4x8-minmax-sse-dup.c",
     "src/f32-igemm/gen/4x8-minmax-sse-load1.c",
     "src/f32-igemm/gen/4x8s4-minmax-sse.c",
+    "src/f32-igemm/gen/5x8-minmax-sse-dup.c",
+    "src/f32-igemm/gen/5x8-minmax-sse-load1.c",
+    "src/f32-igemm/gen/5x8s4-minmax-sse.c",
     "src/f32-maxpool/9p8x-minmax-sse-c4.c",
     "src/f32-pavgpool/9p8x-minmax-sse-c4.c",
     "src/f32-pavgpool/9x-minmax-sse-c4.c",
@@ -2131,6 +2362,18 @@
     "src/f32-argmaxpool/4x-sse2-c4.c",
     "src/f32-argmaxpool/9p8x-sse2-c4.c",
     "src/f32-argmaxpool/9x-sse2-c4.c",
+    "src/f32-gemm/gen-inc/1x8inc-minmax-sse2-dup.c",
+    "src/f32-gemm/gen-inc/3x8inc-minmax-sse2-dup.c",
+    "src/f32-gemm/gen-inc/4x8inc-minmax-sse2-dup.c",
+    "src/f32-gemm/gen-inc/5x8inc-minmax-sse2-dup.c",
+    "src/f32-gemm/gen/1x8-minmax-sse2-dup.c",
+    "src/f32-gemm/gen/3x8-minmax-sse2-dup.c",
+    "src/f32-gemm/gen/4x8-minmax-sse2-dup.c",
+    "src/f32-gemm/gen/5x8-minmax-sse2-dup.c",
+    "src/f32-igemm/gen/1x8-minmax-sse2-dup.c",
+    "src/f32-igemm/gen/3x8-minmax-sse2-dup.c",
+    "src/f32-igemm/gen/4x8-minmax-sse2-dup.c",
+    "src/f32-igemm/gen/5x8-minmax-sse2-dup.c",
     "src/f32-prelu/gen/sse2-2x4.c",
     "src/f32-prelu/gen/sse2-2x8.c",
     "src/f32-raddstoreexpminusmax/gen/sse2-p5-x4.c",
@@ -2157,6 +2400,18 @@
     "src/f32-sigmoid/gen/sse2-p5-div-x16.c",
     "src/f32-sigmoid/gen/sse2-p5-div-x20.c",
     "src/f32-sigmoid/gen/sse2-p5-div-x24.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x12.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x20.c",
+    "src/f32-velu/gen/velu-sse2-rr2-p6-x24.c",
     "src/f32-vlrelu/gen/vlrelu-sse2-x4.c",
     "src/f32-vlrelu/gen/vlrelu-sse2-x8.c",
     "src/f32-vrnd/gen/vrndd-sse2-x4.c",
@@ -2319,6 +2574,18 @@
     "src/f32-sigmoid/gen/sse41-p5-div-x16.c",
     "src/f32-sigmoid/gen/sse41-p5-div-x20.c",
     "src/f32-sigmoid/gen/sse41-p5-div-x24.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x4.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x12.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x20.c",
+    "src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x4.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x12.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x20.c",
+    "src/f32-velu/gen/velu-sse41-rr2-p6-x24.c",
     "src/f32-vlrelu/gen/vlrelu-sse41-x4.c",
     "src/f32-vlrelu/gen/vlrelu-sse41-x8.c",
     "src/f32-vrnd/gen/vrndd-sse41-x4.c",
@@ -2491,6 +2758,24 @@
     "src/f32-vbinary/gen/vsub-minmax-avx-x16.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx-x8.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx-x16.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x8.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x16.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x24.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x32.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x40.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x48.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x8.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x16.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x24.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x32.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x40.c",
+    "src/f32-velu/gen/velu-avx-rr2-lut16-p3-x48.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x8.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x16.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x24.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x32.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x40.c",
+    "src/f32-velu/gen/velu-avx-rr2-p6-x48.c",
     "src/f32-vlrelu/gen/vlrelu-avx-x8.c",
     "src/f32-vlrelu/gen/vlrelu-avx-x16.c",
     "src/f32-vrnd/gen/vrndd-avx-x8.c",
@@ -2693,6 +2978,46 @@
     "src/f32-sigmoid/gen/avx2-rr1-p5-nr2fma-x64.c",
     "src/f32-sigmoid/gen/avx2-rr1-p5-nr2fma-x72.c",
     "src/f32-sigmoid/gen/avx2-rr1-p5-nr2fma-x80.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x8.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x16.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x24.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x32.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x40.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x48.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x56.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x64.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x72.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x80.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x8.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x16.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x24.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x32.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x40.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x48.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x56.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x64.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x72.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x80.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x8.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x16.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x24.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x32.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x40.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x48.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x56.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x64.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x72.c",
+    "src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x80.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x8.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x16.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x24.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x32.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x40.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x48.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x56.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x64.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x72.c",
+    "src/f32-velu/gen/velu-avx2-rr1-p6-x80.c",
     "src/f32-vscaleexpminusmax/gen/avx2-p5-x8.c",
     "src/f32-vscaleexpminusmax/gen/avx2-p5-x16.c",
     "src/f32-vscaleexpminusmax/gen/avx2-p5-x24.c",
@@ -2720,11 +3045,11 @@
     "src/math/exp-avx2-rr2-lut8-p3-perm.c",
     "src/math/exp-avx2-rr2-lut8-p4-perm.c",
     "src/math/exp-avx2-rr2-p5.c",
-    "src/math/expminus-avx2-rr2-p5.c",
     "src/math/expm1minus-avx2-rr1-lut4-p4-perm.c",
     "src/math/expm1minus-avx2-rr1-lut8-p4-perm.c",
     "src/math/expm1minus-avx2-rr1-lut16-p3-gather.c",
     "src/math/expm1minus-avx2-rr1-p6.c",
+    "src/math/expminus-avx2-rr2-p5.c",
     "src/math/extexp-avx2-p5.c",
     "src/math/sigmoid-avx2-rr1-lut64-p2-gather-div.c",
     "src/math/sigmoid-avx2-rr1-lut64-p2-gather-nr1fma.c",
@@ -2921,6 +3246,22 @@
     "src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c",
     "src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x16.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x32.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x48.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x64.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x80.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x96.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x112.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x128.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x16.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x32.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x48.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x64.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x80.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x96.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x112.c",
+    "src/f32-velu/gen/velu-avx512f-rr1-p6-x128.c",
     "src/f32-vlrelu/gen/vlrelu-avx512f-x16.c",
     "src/f32-vlrelu/gen/vlrelu-avx512f-x32.c",
     "src/f32-vrnd/gen/vrndd-avx512f-x16.c",
@@ -3308,54 +3649,6 @@
 )
 
 xnnpack_cc_library(
-    name = "psimd_accmath_ukernels",
-    hdrs = INTERNAL_HDRS,
-    aarch32_copts = [
-        "-marm",
-        "-mfpu=neon",
-    ],
-    gcc_copts = xnnpack_gcc_std_copts(),
-    gcc_x86_copts = ["-msse2"],
-    msvc_copts = xnnpack_msvc_std_copts(),
-    optimized_copts = [
-        "-O3",
-    ],
-    psimd_srcs = PSIMD_ACCMATH_UKERNELS,
-    deps = [
-        ":tables",
-        "@FP16",
-        "@psimd",
-        "@pthreadpool",
-    ],
-)
-
-xnnpack_cc_library(
-    name = "psimd_accmath_ukernels_test_mode",
-    hdrs = INTERNAL_HDRS,
-    aarch32_copts = [
-        "-marm",
-        "-mfpu=neon",
-    ],
-    copts = [
-        "-UNDEBUG",
-        "-DXNN_TEST_MODE=1",
-    ],
-    gcc_copts = xnnpack_gcc_std_copts(),
-    gcc_x86_copts = ["-msse2"],
-    msvc_copts = xnnpack_msvc_std_copts(),
-    optimized_copts = [
-        "-O3",
-    ],
-    psimd_srcs = PSIMD_ACCMATH_UKERNELS,
-    deps = [
-        ":tables",
-        "@FP16",
-        "@psimd",
-        "@pthreadpool",
-    ],
-)
-
-xnnpack_cc_library(
     name = "neon_ukernels",
     hdrs = INTERNAL_HDRS,
     aarch32_copts = [
@@ -3968,9 +4261,6 @@
     generic_deps = [
         ":scalar_ukernels",
     ],
-    psimd_deps = [
-        ":psimd_accmath_ukernels",
-    ],
     wasm_deps = [
         ":wasm_ukernels",
         ":asm_ukernels",
@@ -4012,9 +4302,6 @@
     generic_deps = [
         ":scalar_ukernels_test_mode",
     ],
-    psimd_deps = [
-        ":psimd_accmath_ukernels_test_mode",
-    ],
     wasm_deps = [
         ":wasm_ukernels_test_mode",
         ":asm_ukernels",
@@ -4056,9 +4343,6 @@
     generic_deps = [
         ":scalar_ukernels",
     ],
-    psimd_deps = [
-        ":psimd_accmath_ukernels",
-    ],
     wasm_deps = [
         ":wasm_ukernels",
     ],
@@ -4242,7 +4526,7 @@
     defines = select({
         ":xnn_enable_sparse_explicit_true": ["XNN_ENABLE_SPARSE=1"],
         ":xnn_enable_sparse_explicit_false": ["XNN_ENABLE_SPARSE=0"],
-        "//conditions:default": ["XNN_ENABLE_SPARSE=0"],
+        "//conditions:default": ["XNN_ENABLE_SPARSE=1"],
     }),
 )
 
@@ -4563,7 +4847,7 @@
     name = "f16_spmm_bench",
     srcs = [
         "bench/f16-spmm.cc",
-        "bench/gemm.h",
+        "bench/spmm.h",
         "src/xnnpack/AlignedAllocator.h",
     ] + MICROKERNEL_BENCHMARK_HDRS,
     deps = MICROKERNEL_BENCHMARK_DEPS,
@@ -4733,7 +5017,7 @@
     name = "f32_spmm_bench",
     srcs = [
         "bench/f32-spmm.cc",
-        "bench/gemm.h",
+        "bench/spmm.h",
         "src/xnnpack/AlignedAllocator.h",
     ] + MICROKERNEL_BENCHMARK_HDRS,
     deps = MICROKERNEL_BENCHMARK_DEPS,
@@ -4749,6 +5033,15 @@
 )
 
 xnnpack_benchmark(
+    name = "f32_velu_bench",
+    srcs = [
+        "bench/f32-velu.cc",
+        "src/xnnpack/AlignedAllocator.h",
+    ] + MICROKERNEL_BENCHMARK_HDRS,
+    deps = MICROKERNEL_BENCHMARK_DEPS,
+)
+
+xnnpack_benchmark(
     name = "f32_vscaleexpminusmax_bench",
     srcs = [
         "bench/f32-vscaleexpminusmax.cc",
@@ -4847,6 +5140,14 @@
 )
 
 xnnpack_benchmark(
+    name = "elu_bench",
+    srcs = ["bench/elu.cc"],
+    copts = xnnpack_optional_tflite_copts(),
+    tags = ["nowin32"],
+    deps = OPERATOR_BENCHMARK_DEPS + xnnpack_optional_tflite_deps(),
+)
+
+xnnpack_benchmark(
     name = "floor_bench",
     srcs = ["bench/floor.cc"],
     copts = xnnpack_optional_tflite_copts(),
@@ -5091,41 +5392,110 @@
 #################### Accuracy evaluation for math functions ####################
 
 xnnpack_benchmark(
-    name = "f32_exp_eval",
+    name = "f32_exp_ulp_eval",
     srcs = [
-        "eval/f32-exp.cc",
-        "src/xnnpack/AlignedAllocator.h",
-    ] + ACCURACY_EVAL_HDRS,
-    deps = ACCURACY_EVAL_DEPS,
-)
-
-xnnpack_benchmark(
-    name = "f32_expminus_eval",
-    srcs = [
-        "eval/f32-expminus.cc",
-        "src/xnnpack/AlignedAllocator.h",
-    ] + ACCURACY_EVAL_HDRS,
-    deps = ACCURACY_EVAL_DEPS,
-)
-
-xnnpack_benchmark(
-    name = "f32_expm1minus_eval",
-    srcs = [
-        "eval/f32-expm1minus.cc",
+        "eval/f32-exp-ulp.cc",
         "src/xnnpack/AlignedAllocator.h",
     ] + ACCURACY_EVAL_HDRS,
     deps = ACCURACY_EVAL_DEPS + [
         ":bench_utils",
+        "@cpuinfo",
     ],
 )
 
 xnnpack_benchmark(
-    name = "f32_extexp_eval",
+    name = "f32_expminus_ulp_eval",
     srcs = [
-        "eval/f32-extexp.cc",
+        "eval/f32-expminus-ulp.cc",
         "src/xnnpack/AlignedAllocator.h",
     ] + ACCURACY_EVAL_HDRS,
-    deps = ACCURACY_EVAL_DEPS,
+    deps = ACCURACY_EVAL_DEPS + [
+        ":bench_utils",
+        "@cpuinfo",
+    ],
+)
+
+xnnpack_benchmark(
+    name = "f32_expm1minus_ulp_eval",
+    srcs = [
+        "eval/f32-expm1minus-ulp.cc",
+        "src/xnnpack/AlignedAllocator.h",
+    ] + ACCURACY_EVAL_HDRS,
+    deps = ACCURACY_EVAL_DEPS + [
+        ":bench_utils",
+        "@cpuinfo",
+    ],
+)
+
+xnnpack_benchmark(
+    name = "f32_extexp_ulp_eval",
+    srcs = [
+        "eval/f32-extexp-ulp.cc",
+        "src/xnnpack/AlignedAllocator.h",
+    ] + ACCURACY_EVAL_HDRS,
+    deps = ACCURACY_EVAL_DEPS + [
+        ":bench_utils",
+        "@cpuinfo",
+    ],
+)
+
+xnnpack_benchmark(
+    name = "f32_sigmoid_ulp_eval",
+    srcs = [
+        "eval/f32-sigmoid-ulp.cc",
+        "src/xnnpack/AlignedAllocator.h",
+    ] + ACCURACY_EVAL_HDRS,
+    deps = ACCURACY_EVAL_DEPS + [
+        ":bench_utils",
+        "@cpuinfo",
+    ],
+)
+
+xnnpack_benchmark(
+    name = "f32_sqrt_ulp_eval",
+    srcs = [
+        "eval/f32-sqrt-ulp.cc",
+        "src/xnnpack/AlignedAllocator.h",
+    ] + ACCURACY_EVAL_HDRS,
+    deps = ACCURACY_EVAL_DEPS + [
+        ":bench_utils",
+        "@cpuinfo",
+    ],
+)
+
+################### Accuracy verification for math functions ##################
+
+xnnpack_unit_test(
+    name = "f32_exp_eval",
+    srcs = [
+        "eval/f32-exp.cc",
+        "src/xnnpack/AlignedAllocator.h",
+        "src/xnnpack/math-stubs.h",
+    ] + MICROKERNEL_TEST_HDRS,
+    automatic = False,
+    deps = MICROKERNEL_TEST_DEPS,
+)
+
+xnnpack_unit_test(
+    name = "f32_expm1minus_eval",
+    srcs = [
+        "eval/f32-expm1minus.cc",
+        "src/xnnpack/AlignedAllocator.h",
+        "src/xnnpack/math-stubs.h",
+    ] + MICROKERNEL_TEST_HDRS,
+    automatic = False,
+    deps = MICROKERNEL_TEST_DEPS,
+)
+
+xnnpack_unit_test(
+    name = "f32_expminus_eval",
+    srcs = [
+        "eval/f32-expminus.cc",
+        "src/xnnpack/AlignedAllocator.h",
+        "src/xnnpack/math-stubs.h",
+    ] + MICROKERNEL_TEST_HDRS,
+    automatic = False,
+    deps = MICROKERNEL_TEST_DEPS,
 )
 
 xnnpack_unit_test(
@@ -5172,26 +5542,6 @@
     deps = MICROKERNEL_TEST_DEPS,
 )
 
-xnnpack_benchmark(
-    name = "f32_sigmoid_eval",
-    srcs = [
-        "eval/f32-sigmoid.cc",
-        "src/xnnpack/AlignedAllocator.h",
-    ] + ACCURACY_EVAL_HDRS,
-    deps = ACCURACY_EVAL_DEPS,
-)
-
-xnnpack_benchmark(
-    name = "f32_sqrt_eval",
-    srcs = [
-        "eval/f32-sqrt.cc",
-        "src/xnnpack/AlignedAllocator.h",
-    ] + ACCURACY_EVAL_HDRS,
-    deps = ACCURACY_EVAL_DEPS + [
-        ":bench_utils",
-    ],
-)
-
 ######################### Unit tests for micro-kernels #########################
 
 xnnpack_unit_test(
@@ -5844,6 +6194,15 @@
 )
 
 xnnpack_unit_test(
+    name = "f32_velu_test",
+    srcs = [
+        "test/f32-velu.cc",
+        "test/vunary-microkernel-tester.h",
+    ] + MICROKERNEL_TEST_HDRS,
+    deps = MICROKERNEL_TEST_DEPS,
+)
+
+xnnpack_unit_test(
     name = "f32_vmax_test",
     srcs = [
         "test/f32-vmax.cc",
@@ -6523,6 +6882,7 @@
 
 xnnpack_unit_test(
     name = "deconvolution_nhwc_test",
+    timeout = "moderate",
     srcs = [
         "test/deconvolution-nhwc.cc",
         "test/deconvolution-operator-tester.h",
@@ -6558,6 +6918,15 @@
 )
 
 xnnpack_unit_test(
+    name = "elu_nc_test",
+    srcs = [
+        "test/elu-nc.cc",
+        "test/elu-operator-tester.h",
+    ],
+    deps = OPERATOR_TEST_DEPS,
+)
+
+xnnpack_unit_test(
     name = "fully_connected_nc_test",
     srcs = [
         "test/fully-connected-nc.cc",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 17dcac0..a81cfc6 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -16,10 +16,17 @@
 SET_PROPERTY(CACHE XNNPACK_LIBRARY_TYPE PROPERTY STRINGS default static shared)
 OPTION(XNNPACK_ENABLE_ASSEMBLY "Build XNNPACK with assembly micro-kernels" ON)
 OPTION(XNNPACK_ENABLE_MEMOPT "Build XNNPACK with optimized memory allocation scheme" ON)
+OPTION(XNNPACK_ENABLE_SPARSE "Build XNNPACK with graph rewriting for sparse inference" ON)
 OPTION(XNNPACK_BUILD_TESTS "Build XNNPACK unit tests" ON)
 OPTION(XNNPACK_BUILD_BENCHMARKS "Build XNNPACK benchmarks" ON)
 OPTION(XNNPACK_USE_SYSTEM_LIBS "Use system-provided dependency libraries" OFF)
 
+# --- [ Determine target processor
+SET(XNNPACK_TARGET_PROCESSOR "${CMAKE_SYSTEM_PROCESSOR}")
+IF(CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_OSX_ARCHITECTURES MATCHES "^(x86_64|arm64|arm64e)$")
+  SET(XNNPACK_TARGET_PROCESSOR "${CMAKE_OSX_ARCHITECTURES}")
+ENDIF()
+
 # ---[ CMake options
 INCLUDE(GNUInstallDirs)
 
@@ -39,6 +46,12 @@
   ADD_DEFINITIONS(-DXNN_ENABLE_MEMOPT=0)
 ENDIF()
 
+IF(XNNPACK_ENABLE_SPARSE)
+  ADD_DEFINITIONS(-DXNN_ENABLE_SPARSE=1)
+ELSE()
+  ADD_DEFINITIONS(-DXNN_ENABLE_SPARSE=0)
+ENDIF()
+
 IF(CMAKE_C_COMPILER_ID STREQUAL "MSVC")
   # Disable "unary minus operator applied to unsigned type, result still unsigned" warning
   ADD_COMPILE_OPTIONS("/wd4146")
@@ -63,8 +76,8 @@
   ELSE()
     MESSAGE(FATAL_ERROR "CMAKE_SYSTEM_PROCESSOR is not defined")
   ENDIF()
-ELSEIF(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "^(i[3-6]86|x86|x86_64|AMD64|armv[5-8].*|aarch64|arm64)$")
-  MESSAGE(FATAL_ERROR "Unrecognized CMAKE_SYSTEM_PROCESSOR = ${CMAKE_SYSTEM_PROCESSOR}")
+ELSEIF(NOT XNNPACK_TARGET_PROCESSOR MATCHES "^(i[3-6]86|x86|x86_64|AMD64|armv[5-8].*|aarch64|arm64.*)$")
+  MESSAGE(FATAL_ERROR "Unrecognized XNNPACK_TARGET_PROCESSOR = ${XNNPACK_TARGET_PROCESSOR}")
 ENDIF()
 
 IF(NOT CMAKE_SYSTEM_NAME)
@@ -115,16 +128,6 @@
     SET(FXDIV_SOURCE_DIR "${CMAKE_BINARY_DIR}/FXdiv-source" CACHE STRING "FXdiv source directory")
   ENDIF()
 
-  IF(NOT DEFINED PSIMD_SOURCE_DIR)
-    MESSAGE(STATUS "Downloading PSimd to ${CMAKE_BINARY_DIR}/psimd-source (define PSIMD_SOURCE_DIR to avoid it)")
-    CONFIGURE_FILE(cmake/DownloadPSimd.cmake "${CMAKE_BINARY_DIR}/psimd-download/CMakeLists.txt")
-    EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
-      WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/psimd-download")
-    EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" --build .
-      WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/psimd-download")
-    SET(PSIMD_SOURCE_DIR "${CMAKE_BINARY_DIR}/psimd-source" CACHE STRING "PSimd source directory")
-  ENDIF()
-
   IF(NOT DEFINED PTHREADPOOL_SOURCE_DIR)
     MESSAGE(STATUS "Downloading pthreadpool to ${CMAKE_BINARY_DIR}/pthreadpool-source (define PTHREADPOOL_SOURCE_DIR to avoid it)")
     CONFIGURE_FILE(cmake/DownloadPThreadPool.cmake "${CMAKE_BINARY_DIR}/pthreadpool-download/CMakeLists.txt")
@@ -194,6 +197,7 @@
   src/subgraph/depth-to-space.c
   src/subgraph/depthwise-convolution-2d.c
   src/subgraph/divide.c
+  src/subgraph/elu.c
   src/subgraph/floor.c
   src/subgraph/fully-connected.c
   src/subgraph/global-average-pooling-2d.c
@@ -546,6 +550,18 @@
   src/f32-vbinary/gen/vsubc-scalar-x2.c
   src/f32-vbinary/gen/vsubc-scalar-x4.c
   src/f32-vbinary/gen/vsubc-scalar-x8.c
+  src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x1.c
+  src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x2.c
+  src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x3.c
+  src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x4.c
+  src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x5.c
+  src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x6.c
+  src/f32-velu/gen/velu-scalar-rr2-p6-x1.c
+  src/f32-velu/gen/velu-scalar-rr2-p6-x2.c
+  src/f32-velu/gen/velu-scalar-rr2-p6-x3.c
+  src/f32-velu/gen/velu-scalar-rr2-p6-x4.c
+  src/f32-velu/gen/velu-scalar-rr2-p6-x5.c
+  src/f32-velu/gen/velu-scalar-rr2-p6-x6.c
   src/f32-vlrelu/gen/vlrelu-scalar-x1.c
   src/f32-vlrelu/gen/vlrelu-scalar-x2.c
   src/f32-vlrelu/gen/vlrelu-scalar-x4.c
@@ -649,12 +665,6 @@
   src/x32-zip/xm-scalar.c
   src/xx-copy/memcpy.c)
 
-SET(XNNPACK_PSIMD_ACCMATH_MICROKERNEL_SRCS
-  src/qs8-requantization/fp32-psimd.c
-  src/qs8-requantization/precise-psimd.c
-  src/qu8-requantization/fp32-psimd.c
-  src/qu8-requantization/precise-psimd.c)
-
 SET(XNNPACK_NEON_MICROKERNEL_SRCS
   src/f32-argmaxpool/4x-neon-c4.c
   src/f32-argmaxpool/9p8x-neon-c4.c
@@ -671,6 +681,7 @@
   src/f32-conv-hwc/gen/3x3s2p1c3x4-neon-2x2.c
   src/f32-conv-hwc/gen/3x3s2p1c3x8-neon-2x1.c
   src/f32-conv-hwc/gen/3x3s2p1c3x8-neon-2x2.c
+  src/f32-conv-hwc2chw/3x3s2p1c3x4-neon-2x2.c
   src/f32-dwconv/gen/up4x4-minmax-neon-acc2.c
   src/f32-dwconv/gen/up4x4-minmax-neon.c
   src/f32-dwconv/gen/up4x9-minmax-neon-acc2.c
@@ -820,7 +831,6 @@
   src/f32-relu/gen/neon-x4.c
   src/f32-relu/gen/neon-x8.c
   src/f32-rmax/neon.c
-  src/f32-sigmoid/gen/neon-frac-p9-p10-nr1recps-x16.c
   src/f32-sigmoid/gen/neon-rr2-lut64-p2-nr2recps-x4.c
   src/f32-sigmoid/gen/neon-rr2-lut64-p2-nr2recps-x8.c
   src/f32-sigmoid/gen/neon-rr2-lut64-p2-nr2recps-x12.c
@@ -839,6 +849,19 @@
   src/f32-sigmoid/gen/neon-rr2-p5-nr2recps-x16.c
   src/f32-sigmoid/gen/neon-rr2-p5-nr2recps-x20.c
   src/f32-sigmoid/gen/neon-rr2-p5-nr2recps-x24.c
+  src/f32-spmm/gen/4x1-minmax-neon-pipelined.c
+  src/f32-spmm/gen/4x1-minmax-neon-x2.c
+  src/f32-spmm/gen/4x1-minmax-neon.c
+  src/f32-spmm/gen/8x1-minmax-neon-pipelined.c
+  src/f32-spmm/gen/8x1-minmax-neon-x2.c
+  src/f32-spmm/gen/8x1-minmax-neon.c
+  src/f32-spmm/gen/12x1-minmax-neon.c
+  src/f32-spmm/gen/16x1-minmax-neon-pipelined.c
+  src/f32-spmm/gen/16x1-minmax-neon-x2.c
+  src/f32-spmm/gen/16x1-minmax-neon.c
+  src/f32-spmm/gen/32x1-minmax-neon-pipelined.c
+  src/f32-spmm/gen/32x1-minmax-neon-x2.c
+  src/f32-spmm/gen/32x1-minmax-neon.c
   src/f32-vbinary/gen/vadd-minmax-neon-x4.c
   src/f32-vbinary/gen/vadd-minmax-neon-x8.c
   src/f32-vbinary/gen/vaddc-minmax-neon-x4.c
@@ -865,6 +888,18 @@
   src/f32-vbinary/gen/vsub-minmax-neon-x8.c
   src/f32-vbinary/gen/vsubc-minmax-neon-x4.c
   src/f32-vbinary/gen/vsubc-minmax-neon-x8.c
+  src/f32-velu/gen/velu-neon-rr2-lut16-p3-x4.c
+  src/f32-velu/gen/velu-neon-rr2-lut16-p3-x8.c
+  src/f32-velu/gen/velu-neon-rr2-lut16-p3-x12.c
+  src/f32-velu/gen/velu-neon-rr2-lut16-p3-x16.c
+  src/f32-velu/gen/velu-neon-rr2-lut16-p3-x20.c
+  src/f32-velu/gen/velu-neon-rr2-lut16-p3-x24.c
+  src/f32-velu/gen/velu-neon-rr2-p6-x4.c
+  src/f32-velu/gen/velu-neon-rr2-p6-x8.c
+  src/f32-velu/gen/velu-neon-rr2-p6-x12.c
+  src/f32-velu/gen/velu-neon-rr2-p6-x16.c
+  src/f32-velu/gen/velu-neon-rr2-p6-x20.c
+  src/f32-velu/gen/velu-neon-rr2-p6-x24.c
   src/f32-vlrelu/gen/vlrelu-neon-x4.c
   src/f32-vlrelu/gen/vlrelu-neon-x8.c
   src/f32-vmulcaddc/gen/c4-minmax-neon-2x.c
@@ -892,10 +927,6 @@
   src/math/roundu-neon-cvt.c
   src/math/roundz-neon-addsub.c
   src/math/roundz-neon-cvt.c
-  src/math/sigmoid-neon-frac-p9-p10-nr1recps.c
-  src/math/sigmoid-neon-rr1-lut64-p2-nr2recps.c
-  src/math/sigmoid-neon-rr1-lut2048-p1-nr2recps.c
-  src/math/sigmoid-neon-rr1-p5-nr2recps.c
   src/math/sigmoid-neon-rr2-lut64-p2-nr2recps.c
   src/math/sigmoid-neon-rr2-lut2048-p1-nr2recps.c
   src/math/sigmoid-neon-rr2-p5-nr2recps.c
@@ -1084,6 +1115,31 @@
   src/f32-sigmoid/gen/neonfma-rr1-p5-nr2recps-x16.c
   src/f32-sigmoid/gen/neonfma-rr1-p5-nr2recps-x20.c
   src/f32-sigmoid/gen/neonfma-rr1-p5-nr2recps-x24.c
+  src/f32-spmm/gen/4x1-minmax-neonfma-pipelined.c
+  src/f32-spmm/gen/4x1-minmax-neonfma-x2.c
+  src/f32-spmm/gen/4x1-minmax-neonfma.c
+  src/f32-spmm/gen/8x1-minmax-neonfma-pipelined.c
+  src/f32-spmm/gen/8x1-minmax-neonfma-x2.c
+  src/f32-spmm/gen/8x1-minmax-neonfma.c
+  src/f32-spmm/gen/12x1-minmax-neonfma.c
+  src/f32-spmm/gen/16x1-minmax-neonfma-pipelined.c
+  src/f32-spmm/gen/16x1-minmax-neonfma-x2.c
+  src/f32-spmm/gen/16x1-minmax-neonfma.c
+  src/f32-spmm/gen/32x1-minmax-neonfma-pipelined.c
+  src/f32-spmm/gen/32x1-minmax-neonfma-x2.c
+  src/f32-spmm/gen/32x1-minmax-neonfma.c
+  src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x4.c
+  src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x8.c
+  src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x12.c
+  src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x16.c
+  src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x20.c
+  src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x24.c
+  src/f32-velu/gen/velu-neonfma-rr1-p6-x4.c
+  src/f32-velu/gen/velu-neonfma-rr1-p6-x8.c
+  src/f32-velu/gen/velu-neonfma-rr1-p6-x12.c
+  src/f32-velu/gen/velu-neonfma-rr1-p6-x16.c
+  src/f32-velu/gen/velu-neonfma-rr1-p6-x20.c
+  src/f32-velu/gen/velu-neonfma-rr1-p6-x24.c
   src/f32-vmulcaddc/gen/c4-minmax-neonfma-2x.c
   src/f32-vmulcaddc/gen/c8-minmax-neonfma-2x.c
   src/f32-vsqrt/gen/neonfma-nr1rsqrts1fma1adj-x4.c
@@ -1108,11 +1164,11 @@
   src/f32-vsqrt/gen/neonfma-nr2fma1adj-x40.c
   src/math/exp-neonfma-rr2-lut64-p2.c
   src/math/exp-neonfma-rr2-p5.c
+  src/math/expm1minus-neonfma-rr1-lut16-p3.c
+  src/math/expm1minus-neonfma-rr1-p6.c
   src/math/expminus-neonfma-rr2-lut64-p2.c
   src/math/expminus-neonfma-rr2-lut2048-p1.c
   src/math/expminus-neonfma-rr2-p5.c
-  src/math/expm1minus-neonfma-rr1-lut16-p3.c
-  src/math/expm1minus-neonfma-rr1-p6.c
   src/math/sigmoid-neonfma-rr1-lut64-p2-nr1recps1fma.c
   src/math/sigmoid-neonfma-rr1-lut64-p2-nr2fma.c
   src/math/sigmoid-neonfma-rr1-lut64-p2-nr2recps.c
@@ -1240,27 +1296,14 @@
   src/f32-sigmoid/gen/neonfma-rr1-p5-div-x16.c
   src/f32-sigmoid/gen/neonfma-rr1-p5-div-x20.c
   src/f32-sigmoid/gen/neonfma-rr1-p5-div-x24.c
-  src/f32-spmm/gen/4x1-minmax-neonfma-pipelined.c
-  src/f32-spmm/gen/4x1-minmax-neonfma-x2.c
-  src/f32-spmm/gen/4x1-minmax-neonfma.c
   src/f32-spmm/gen/4x2-minmax-neonfma.c
   src/f32-spmm/gen/4x4-minmax-neonfma.c
-  src/f32-spmm/gen/8x1-minmax-neonfma-pipelined.c
-  src/f32-spmm/gen/8x1-minmax-neonfma-x2.c
-  src/f32-spmm/gen/8x1-minmax-neonfma.c
   src/f32-spmm/gen/8x2-minmax-neonfma.c
   src/f32-spmm/gen/8x4-minmax-neonfma.c
-  src/f32-spmm/gen/12x1-minmax-neonfma.c
   src/f32-spmm/gen/12x2-minmax-neonfma.c
   src/f32-spmm/gen/12x4-minmax-neonfma.c
-  src/f32-spmm/gen/16x1-minmax-neonfma-pipelined.c
-  src/f32-spmm/gen/16x1-minmax-neonfma-x2.c
-  src/f32-spmm/gen/16x1-minmax-neonfma.c
   src/f32-spmm/gen/16x2-minmax-neonfma.c
   src/f32-spmm/gen/16x4-minmax-neonfma.c
-  src/f32-spmm/gen/32x1-minmax-neonfma-pipelined.c
-  src/f32-spmm/gen/32x1-minmax-neonfma-x2.c
-  src/f32-spmm/gen/32x1-minmax-neonfma.c
   src/f32-spmm/gen/32x2-minmax-neonfma.c
   src/f32-spmm/gen/32x4-minmax-neonfma.c
   src/f32-vbinary/gen/vdiv-minmax-neon-x4.c
@@ -1450,16 +1493,28 @@
   src/f32-gemm/gen-inc/1x8inc-minmax-sse-dup.c
   src/f32-gemm/gen-inc/1x8inc-minmax-sse-load1.c
   src/f32-gemm/gen-inc/1x8s4inc-minmax-sse.c
+  src/f32-gemm/gen-inc/3x8inc-minmax-sse-dup.c
+  src/f32-gemm/gen-inc/3x8inc-minmax-sse-load1.c
+  src/f32-gemm/gen-inc/3x8s4inc-minmax-sse.c
   src/f32-gemm/gen-inc/4x8inc-minmax-sse-dup.c
   src/f32-gemm/gen-inc/4x8inc-minmax-sse-load1.c
   src/f32-gemm/gen-inc/4x8s4inc-minmax-sse.c
+  src/f32-gemm/gen-inc/5x8inc-minmax-sse-dup.c
+  src/f32-gemm/gen-inc/5x8inc-minmax-sse-load1.c
+  src/f32-gemm/gen-inc/5x8s4inc-minmax-sse.c
   src/f32-gemm/gen/1x8-minmax-sse-dup.c
   src/f32-gemm/gen/1x8-minmax-sse-load1.c
   src/f32-gemm/gen/1x8s4-minmax-sse.c
+  src/f32-gemm/gen/3x8-minmax-sse-dup.c
+  src/f32-gemm/gen/3x8-minmax-sse-load1.c
+  src/f32-gemm/gen/3x8s4-minmax-sse.c
   src/f32-gemm/gen/4x2c4-minmax-sse.c
   src/f32-gemm/gen/4x8-minmax-sse-dup.c
   src/f32-gemm/gen/4x8-minmax-sse-load1.c
   src/f32-gemm/gen/4x8s4-minmax-sse.c
+  src/f32-gemm/gen/5x8-minmax-sse-dup.c
+  src/f32-gemm/gen/5x8-minmax-sse-load1.c
+  src/f32-gemm/gen/5x8s4-minmax-sse.c
   src/f32-hswish/gen/hswish-sse-x4.c
   src/f32-hswish/gen/hswish-sse-x8.c
   src/f32-ibilinear/gen/sse-c4.c
@@ -1467,10 +1522,16 @@
   src/f32-igemm/gen/1x8-minmax-sse-dup.c
   src/f32-igemm/gen/1x8-minmax-sse-load1.c
   src/f32-igemm/gen/1x8s4-minmax-sse.c
+  src/f32-igemm/gen/3x8-minmax-sse-dup.c
+  src/f32-igemm/gen/3x8-minmax-sse-load1.c
+  src/f32-igemm/gen/3x8s4-minmax-sse.c
   src/f32-igemm/gen/4x2c4-minmax-sse.c
   src/f32-igemm/gen/4x8-minmax-sse-dup.c
   src/f32-igemm/gen/4x8-minmax-sse-load1.c
   src/f32-igemm/gen/4x8s4-minmax-sse.c
+  src/f32-igemm/gen/5x8-minmax-sse-dup.c
+  src/f32-igemm/gen/5x8-minmax-sse-load1.c
+  src/f32-igemm/gen/5x8s4-minmax-sse.c
   src/f32-maxpool/9p8x-minmax-sse-c4.c
   src/f32-pavgpool/9p8x-minmax-sse-c4.c
   src/f32-pavgpool/9x-minmax-sse-c4.c
@@ -1543,6 +1604,18 @@
   src/f32-argmaxpool/4x-sse2-c4.c
   src/f32-argmaxpool/9p8x-sse2-c4.c
   src/f32-argmaxpool/9x-sse2-c4.c
+  src/f32-gemm/gen-inc/1x8inc-minmax-sse2-dup.c
+  src/f32-gemm/gen-inc/3x8inc-minmax-sse2-dup.c
+  src/f32-gemm/gen-inc/4x8inc-minmax-sse2-dup.c
+  src/f32-gemm/gen-inc/5x8inc-minmax-sse2-dup.c
+  src/f32-gemm/gen/1x8-minmax-sse2-dup.c
+  src/f32-gemm/gen/3x8-minmax-sse2-dup.c
+  src/f32-gemm/gen/4x8-minmax-sse2-dup.c
+  src/f32-gemm/gen/5x8-minmax-sse2-dup.c
+  src/f32-igemm/gen/1x8-minmax-sse2-dup.c
+  src/f32-igemm/gen/3x8-minmax-sse2-dup.c
+  src/f32-igemm/gen/4x8-minmax-sse2-dup.c
+  src/f32-igemm/gen/5x8-minmax-sse2-dup.c
   src/f32-prelu/gen/sse2-2x4.c
   src/f32-prelu/gen/sse2-2x8.c
   src/f32-raddstoreexpminusmax/gen/sse2-p5-x4.c
@@ -1569,6 +1642,18 @@
   src/f32-sigmoid/gen/sse2-p5-div-x16.c
   src/f32-sigmoid/gen/sse2-p5-div-x20.c
   src/f32-sigmoid/gen/sse2-p5-div-x24.c
+  src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x4.c
+  src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x8.c
+  src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x12.c
+  src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x16.c
+  src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x20.c
+  src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x24.c
+  src/f32-velu/gen/velu-sse2-rr2-p6-x4.c
+  src/f32-velu/gen/velu-sse2-rr2-p6-x8.c
+  src/f32-velu/gen/velu-sse2-rr2-p6-x12.c
+  src/f32-velu/gen/velu-sse2-rr2-p6-x16.c
+  src/f32-velu/gen/velu-sse2-rr2-p6-x20.c
+  src/f32-velu/gen/velu-sse2-rr2-p6-x24.c
   src/f32-vlrelu/gen/vlrelu-sse2-x4.c
   src/f32-vlrelu/gen/vlrelu-sse2-x8.c
   src/f32-vrnd/gen/vrndd-sse2-x4.c
@@ -1729,6 +1814,18 @@
   src/f32-sigmoid/gen/sse41-p5-div-x16.c
   src/f32-sigmoid/gen/sse41-p5-div-x20.c
   src/f32-sigmoid/gen/sse41-p5-div-x24.c
+  src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x4.c
+  src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x8.c
+  src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x12.c
+  src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x16.c
+  src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x20.c
+  src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x24.c
+  src/f32-velu/gen/velu-sse41-rr2-p6-x4.c
+  src/f32-velu/gen/velu-sse41-rr2-p6-x8.c
+  src/f32-velu/gen/velu-sse41-rr2-p6-x12.c
+  src/f32-velu/gen/velu-sse41-rr2-p6-x16.c
+  src/f32-velu/gen/velu-sse41-rr2-p6-x20.c
+  src/f32-velu/gen/velu-sse41-rr2-p6-x24.c
   src/f32-vlrelu/gen/vlrelu-sse41-x4.c
   src/f32-vlrelu/gen/vlrelu-sse41-x8.c
   src/f32-vrnd/gen/vrndd-sse41-x4.c
@@ -1898,6 +1995,24 @@
   src/f32-vbinary/gen/vsub-minmax-avx-x16.c
   src/f32-vbinary/gen/vsubc-minmax-avx-x8.c
   src/f32-vbinary/gen/vsubc-minmax-avx-x16.c
+  src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x8.c
+  src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x16.c
+  src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x24.c
+  src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x32.c
+  src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x40.c
+  src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x48.c
+  src/f32-velu/gen/velu-avx-rr2-lut16-p3-x8.c
+  src/f32-velu/gen/velu-avx-rr2-lut16-p3-x16.c
+  src/f32-velu/gen/velu-avx-rr2-lut16-p3-x24.c
+  src/f32-velu/gen/velu-avx-rr2-lut16-p3-x32.c
+  src/f32-velu/gen/velu-avx-rr2-lut16-p3-x40.c
+  src/f32-velu/gen/velu-avx-rr2-lut16-p3-x48.c
+  src/f32-velu/gen/velu-avx-rr2-p6-x8.c
+  src/f32-velu/gen/velu-avx-rr2-p6-x16.c
+  src/f32-velu/gen/velu-avx-rr2-p6-x24.c
+  src/f32-velu/gen/velu-avx-rr2-p6-x32.c
+  src/f32-velu/gen/velu-avx-rr2-p6-x40.c
+  src/f32-velu/gen/velu-avx-rr2-p6-x48.c
   src/f32-vlrelu/gen/vlrelu-avx-x8.c
   src/f32-vlrelu/gen/vlrelu-avx-x16.c
   src/f32-vrnd/gen/vrndd-avx-x8.c
@@ -2099,6 +2214,46 @@
   src/f32-sigmoid/gen/avx2-rr1-p5-nr2fma-x64.c
   src/f32-sigmoid/gen/avx2-rr1-p5-nr2fma-x72.c
   src/f32-sigmoid/gen/avx2-rr1-p5-nr2fma-x80.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x8.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x16.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x24.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x32.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x40.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x48.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x56.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x64.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x72.c
+  src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x80.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x8.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x16.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x24.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x32.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x40.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x48.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x56.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x64.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x72.c
+  src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x80.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x8.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x16.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x24.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x32.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x40.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x48.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x56.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x64.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x72.c
+  src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x80.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x8.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x16.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x24.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x32.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x40.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x48.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x56.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x64.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x72.c
+  src/f32-velu/gen/velu-avx2-rr1-p6-x80.c
   src/f32-vscaleexpminusmax/gen/avx2-p5-x8.c
   src/f32-vscaleexpminusmax/gen/avx2-p5-x16.c
   src/f32-vscaleexpminusmax/gen/avx2-p5-x24.c
@@ -2126,11 +2281,11 @@
   src/math/exp-avx2-rr2-lut8-p3-perm.c
   src/math/exp-avx2-rr2-lut8-p4-perm.c
   src/math/exp-avx2-rr2-p5.c
-  src/math/expminus-avx2-rr2-p5.c
   src/math/expm1minus-avx2-rr1-lut4-p4-perm.c
   src/math/expm1minus-avx2-rr1-lut8-p4-perm.c
   src/math/expm1minus-avx2-rr1-lut16-p3-gather.c
   src/math/expm1minus-avx2-rr1-p6.c
+  src/math/expminus-avx2-rr2-p5.c
   src/math/extexp-avx2-p5.c
   src/math/sigmoid-avx2-rr1-lut64-p2-gather-div.c
   src/math/sigmoid-avx2-rr1-lut64-p2-gather-nr1fma.c
@@ -2326,6 +2481,22 @@
   src/f32-vbinary/gen/vsub-minmax-avx512f-x32.c
   src/f32-vbinary/gen/vsubc-minmax-avx512f-x16.c
   src/f32-vbinary/gen/vsubc-minmax-avx512f-x32.c
+  src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x16.c
+  src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x32.c
+  src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x48.c
+  src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x64.c
+  src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x80.c
+  src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x96.c
+  src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x112.c
+  src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x128.c
+  src/f32-velu/gen/velu-avx512f-rr1-p6-x16.c
+  src/f32-velu/gen/velu-avx512f-rr1-p6-x32.c
+  src/f32-velu/gen/velu-avx512f-rr1-p6-x48.c
+  src/f32-velu/gen/velu-avx512f-rr1-p6-x64.c
+  src/f32-velu/gen/velu-avx512f-rr1-p6-x80.c
+  src/f32-velu/gen/velu-avx512f-rr1-p6-x96.c
+  src/f32-velu/gen/velu-avx512f-rr1-p6-x112.c
+  src/f32-velu/gen/velu-avx512f-rr1-p6-x128.c
   src/f32-vlrelu/gen/vlrelu-avx512f-x16.c
   src/f32-vlrelu/gen/vlrelu-avx512f-x32.c
   src/f32-vrnd/gen/vrndd-avx512f-x16.c
@@ -2520,26 +2691,27 @@
   src/qs8-gemm/4x16c4-aarch64-neondot-ld64.S)
 
 SET(XNNPACK_MICROKERNEL_SRCS ${XNNPACK_SCALAR_MICROKERNEL_SRCS})
-IF(NOT EMSCRIPTEN AND NOT MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Intel")
-  LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_PSIMD_ACCMATH_MICROKERNEL_SRCS})
-ENDIF()
 IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^armv[5-8]" OR IOS_ARCH MATCHES "^armv7")
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_NEON_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_NEONFMA_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_NEONV8_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_NEONDOT_MICROKERNEL_SRCS})
-  LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_AARCH32_ASM_MICROKERNEL_SRCS})
+  IF(XNNPACK_ENABLE_ASSEMBLY)
+    LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_AARCH32_ASM_MICROKERNEL_SRCS})
+  ENDIF()
 ENDIF()
-IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)$" OR IOS_ARCH MATCHES "^arm64.*")
+IF(XNNPACK_TARGET_PROCESSOR MATCHES "^(aarch64|arm64)$" OR IOS_ARCH MATCHES "^arm64.*")
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_NEON_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_NEONFMA_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_NEONV8_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_AARCH64_NEONFMA_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_AARCH64_NEONFP16ARITH_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_NEONDOT_MICROKERNEL_SRCS})
-  LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_AARCH64_ASM_MICROKERNEL_SRCS})
+  IF(XNNPACK_ENABLE_ASSEMBLY)
+    LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_AARCH64_ASM_MICROKERNEL_SRCS})
+  ENDIF()
 ENDIF()
-IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^(i[3-6]86|x86_64|AMD64)$" OR IOS_ARCH MATCHES "^(i386|x86_64|AMD64)$")
+IF(XNNPACK_TARGET_PROCESSOR MATCHES "^(i[3-6]86|x86_64|AMD64)$" OR IOS_ARCH MATCHES "^(i386|x86_64|AMD64)$")
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_SSE_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_SSE2_MICROKERNEL_SRCS})
   LIST(APPEND XNNPACK_MICROKERNEL_SRCS ${XNNPACK_SSSE3_MICROKERNEL_SRCS})
@@ -2566,7 +2738,6 @@
   C_EXTENSIONS YES)
 IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^armv[5-8]" OR IOS_ARCH MATCHES "^armv7")
   SET_PROPERTY(SOURCE ${XNNPACK_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -marm ")
-  SET_PROPERTY(SOURCE ${XNNPACK_PSIMD_ACCMATH_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -march=armv7-a -mfpu=neon ")
   SET_PROPERTY(SOURCE ${XNNPACK_NEON_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -march=armv7-a -mfpu=neon ")
   SET_PROPERTY(SOURCE ${XNNPACK_NEONFMA_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -march=armv7-a -mfpu=neon-vfpv4 ")
   SET_PROPERTY(SOURCE ${XNNPACK_NEONV8_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -march=armv8-a -mfpu=neon-fp-armv8 ")
@@ -2583,15 +2754,18 @@
     SET_PROPERTY(SOURCE ${XNNPACK_NEONDOT_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -mfloat-abi=softfp ")
   ENDIF()
 ENDIF()
-IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)$" OR IOS_ARCH MATCHES "^arm64.*")
+IF(XNNPACK_TARGET_PROCESSOR MATCHES "^(aarch64|arm64)$" OR IOS_ARCH MATCHES "^arm64.*")
   SET_PROPERTY(SOURCE ${XNNPACK_AARCH64_NEONFP16ARITH_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -march=armv8.2-a+fp16 ")
   SET_PROPERTY(SOURCE ${XNNPACK_NEONDOT_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -march=armv8.2-a+dotprod ")
   SET_PROPERTY(SOURCE ${XNNPACK_AARCH64_ASM_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -march=armv8.2-a+fp16+dotprod ")
   IF(IOS)
     SET_PROPERTY(SOURCE ${XNNPACK_AARCH64_ASM_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -arch ${IOS_ARCH} ")
   ENDIF()
+  IF(XNNPACK_TARGET_PROCESSOR STREQUAL "arm64" AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "x86_64")
+    SET_PROPERTY(SOURCE ${XNNPACK_AARCH64_ASM_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -arch arm64 ")
+  ENDIF()
 ENDIF()
-IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^(i[3-6]86|x86|x86_64|AMD64)$" OR IOS_ARCH MATCHES "^(i386|x86_64|AMD64)$")
+IF(XNNPACK_TARGET_PROCESSOR MATCHES "^(i[3-6]86|x86|x86_64|AMD64)$" OR IOS_ARCH MATCHES "^(i386|x86_64|AMD64)$")
   IF(MSVC)
     IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86" OR CMAKE_SIZEOF_VOID_P EQUAL 4)
       SET_PROPERTY(SOURCE ${XNNPACK_SSE_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " /arch:SSE ")
@@ -2606,7 +2780,6 @@
     SET_PROPERTY(SOURCE ${XNNPACK_AVX512F_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " /arch:AVX512 ")
     SET_PROPERTY(SOURCE ${XNNPACK_AVX512SKX_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " /arch:AVX512 ")
   ELSE()
-    SET_PROPERTY(SOURCE ${XNNPACK_PSIMD_ACCMATH_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -msse2 ")
     SET_PROPERTY(SOURCE ${XNNPACK_SSE_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -msse ")
     SET_PROPERTY(SOURCE ${XNNPACK_SSE2_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -msse2 ")
     SET_PROPERTY(SOURCE ${XNNPACK_SSSE3_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -mssse3 ")
@@ -2652,15 +2825,6 @@
     SET_PROPERTY(SOURCE ${XNNPACK_COLD_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS "$<$<NOT:$<CONFIG:Debug>>: -Os >")
   ENDIF()
 ENDIF()
-IF(NOT EMSCRIPTEN)
-  IF(${CMAKE_VERSION} VERSION_LESS "3.8.0")
-    IF(NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
-      SET_PROPERTY(SOURCE ${XNNPACK_PSIMD_ACCMATH_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS " -O3 ")
-    ENDIF()
-  ELSE()
-    SET_PROPERTY(SOURCE ${XNNPACK_PSIMD_ACCMATH_MICROKERNEL_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS "$<$<NOT:$<CONFIG:Debug>>: -O3 >")
-  ENDIF()
-ENDIF()
 
 TARGET_INCLUDE_DIRECTORIES(XNNPACK PUBLIC include)
 TARGET_INCLUDE_DIRECTORIES(XNNPACK PRIVATE src)
@@ -2757,23 +2921,6 @@
 ENDIF()
 TARGET_LINK_LIBRARIES(XNNPACK PRIVATE fxdiv)
 
-# ---[ Configure psimd
-IF(NOT TARGET psimd)
-  IF(NOT XNNPACK_USE_SYSTEM_LIBS)
-    ADD_SUBDIRECTORY(
-      "${PSIMD_SOURCE_DIR}"
-      "${CMAKE_BINARY_DIR}/psimd")
-  ELSE()
-    FIND_FILE(PSIMD_HDR psimd.h PATH_SUFFIXES include)
-    IF(NOT PSIMD_HDR)
-      MESSAGE(FATAL_ERROR "Cannot find psimd")
-    ENDIF()
-    ADD_LIBRARY(psimd STATIC "${PSIMD_HDR}")
-    SET_PROPERTY(TARGET psimd PROPERTY LINKER_LANGUAGE C)
-  ENDIF()
-ENDIF()
-TARGET_LINK_LIBRARIES(XNNPACK PRIVATE psimd)
-
 # ---[ Configure FP16
 IF(NOT TARGET fp16)
   IF(NOT XNNPACK_USE_SYSTEM_LIBS)
@@ -2967,6 +3114,15 @@
   TARGET_LINK_LIBRARIES(divide-nd-test PRIVATE XNNPACK fp16 gtest gtest_main)
   ADD_TEST(divide-nd-test divide-nd-test)
 
+  ADD_EXECUTABLE(elu-nc-test test/elu-nc.cc)
+  SET_TARGET_PROPERTIES(elu-nc-test PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS NO)
+  TARGET_INCLUDE_DIRECTORIES(elu-nc-test PRIVATE src test)
+  TARGET_LINK_LIBRARIES(elu-nc-test PRIVATE XNNPACK gtest gtest_main)
+  ADD_TEST(elu-nc-test elu-nc-test)
+
   ADD_EXECUTABLE(fully-connected-nc-test test/fully-connected-nc.cc)
   SET_TARGET_PROPERTIES(fully-connected-nc-test PROPERTIES
     CXX_STANDARD 11
@@ -3809,6 +3965,15 @@
   TARGET_LINK_LIBRARIES(f32-vrdivc-relu-test PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
   ADD_TEST(f32-vrdivc-relu-test f32-vrdivc-relu-test)
 
+  ADD_EXECUTABLE(f32-velu-test test/f32-velu.cc)
+  SET_TARGET_PROPERTIES(f32-velu-test PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS YES)
+  TARGET_INCLUDE_DIRECTORIES(f32-velu-test PRIVATE src test)
+  TARGET_LINK_LIBRARIES(f32-velu-test PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
+  ADD_TEST(f32-velu-test f32-velu-test)
+
   ADD_EXECUTABLE(f32-vlrelu-test test/f32-vlrelu.cc)
   SET_TARGET_PROPERTIES(f32-vlrelu-test PROPERTIES
     CXX_STANDARD 11
@@ -4351,21 +4516,62 @@
   TARGET_LINK_LIBRARIES(bench-utils PRIVATE benchmark cpuinfo)
 
   # ---[ Build accuracy microbenchmarks
+  ADD_EXECUTABLE(f32-exp-ulp-eval eval/f32-exp-ulp.cc)
+  SET_TARGET_PROPERTIES(f32-exp-ulp-eval PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS NO)
+  TARGET_INCLUDE_DIRECTORIES(f32-exp-ulp-eval PRIVATE . src)
+  TARGET_LINK_LIBRARIES(f32-exp-ulp-eval PRIVATE XNNPACK benchmark bench-utils cpuinfo fp16 pthreadpool)
+
+  ADD_EXECUTABLE(f32-expminus-ulp-eval eval/f32-expminus-ulp.cc)
+  SET_TARGET_PROPERTIES(f32-expminus-ulp-eval PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS NO)
+  TARGET_INCLUDE_DIRECTORIES(f32-expminus-ulp-eval PRIVATE . src)
+  TARGET_LINK_LIBRARIES(f32-expminus-ulp-eval PRIVATE XNNPACK benchmark bench-utils cpuinfo fp16 pthreadpool)
+
+  ADD_EXECUTABLE(f32-expm1minus-ulp-eval eval/f32-expm1minus-ulp.cc)
+  SET_TARGET_PROPERTIES(f32-expm1minus-ulp-eval PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS NO)
+  TARGET_INCLUDE_DIRECTORIES(f32-expm1minus-ulp-eval PRIVATE . src)
+  TARGET_LINK_LIBRARIES(f32-expm1minus-ulp-eval PRIVATE XNNPACK benchmark bench-utils cpuinfo fp16 pthreadpool)
+
+  ADD_EXECUTABLE(f32-extexp-ulp-eval eval/f32-extexp-ulp.cc)
+  SET_TARGET_PROPERTIES(f32-extexp-ulp-eval PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS YES)
+  TARGET_INCLUDE_DIRECTORIES(f32-extexp-ulp-eval PRIVATE . src)
+  TARGET_LINK_LIBRARIES(f32-extexp-ulp-eval PRIVATE XNNPACK benchmark bench-utils cpuinfo fp16 pthreadpool)
+
+  ADD_EXECUTABLE(f32-sigmoid-ulp-eval eval/f32-sigmoid-ulp.cc)
+  SET_TARGET_PROPERTIES(f32-sigmoid-ulp-eval PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS NO)
+  TARGET_INCLUDE_DIRECTORIES(f32-sigmoid-ulp-eval PRIVATE . src)
+  TARGET_LINK_LIBRARIES(f32-sigmoid-ulp-eval PRIVATE XNNPACK benchmark bench-utils cpuinfo fp16 pthreadpool)
+
+  ADD_EXECUTABLE(f32-sqrt-ulp-eval eval/f32-sqrt-ulp.cc)
+  SET_TARGET_PROPERTIES(f32-sqrt-ulp-eval PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS NO)
+  TARGET_INCLUDE_DIRECTORIES(f32-sqrt-ulp-eval PRIVATE . src)
+  TARGET_LINK_LIBRARIES(f32-sqrt-ulp-eval PRIVATE XNNPACK benchmark bench-utils cpuinfo fp16 pthreadpool)
+
+  # ---[ Build accuracy tests
   ADD_EXECUTABLE(f32-exp-eval eval/f32-exp.cc)
   SET_TARGET_PROPERTIES(f32-exp-eval PROPERTIES
     CXX_STANDARD 11
     CXX_STANDARD_REQUIRED YES
     CXX_EXTENSIONS NO)
   TARGET_INCLUDE_DIRECTORIES(f32-exp-eval PRIVATE src)
-  TARGET_LINK_LIBRARIES(f32-exp-eval PRIVATE XNNPACK benchmark fp16)
-
-  ADD_EXECUTABLE(f32-expminus-eval eval/f32-expminus.cc)
-  SET_TARGET_PROPERTIES(f32-expminus-eval PROPERTIES
-    CXX_STANDARD 11
-    CXX_STANDARD_REQUIRED YES
-    CXX_EXTENSIONS NO)
-  TARGET_INCLUDE_DIRECTORIES(f32-expminus-eval PRIVATE src)
-  TARGET_LINK_LIBRARIES(f32-expminus-eval PRIVATE XNNPACK benchmark fp16)
+  TARGET_LINK_LIBRARIES(f32-exp-eval PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
 
   ADD_EXECUTABLE(f32-expm1minus-eval eval/f32-expm1minus.cc)
   SET_TARGET_PROPERTIES(f32-expm1minus-eval PROPERTIES
@@ -4373,15 +4579,15 @@
     CXX_STANDARD_REQUIRED YES
     CXX_EXTENSIONS NO)
   TARGET_INCLUDE_DIRECTORIES(f32-expm1minus-eval PRIVATE src)
-  TARGET_LINK_LIBRARIES(f32-expm1minus-eval PRIVATE XNNPACK benchmark bench-utils fp16)
+  TARGET_LINK_LIBRARIES(f32-expm1minus-eval PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
 
-  ADD_EXECUTABLE(f32-extexp-eval eval/f32-extexp.cc)
-  SET_TARGET_PROPERTIES(f32-extexp-eval PROPERTIES
+  ADD_EXECUTABLE(f32-expminus-eval eval/f32-expminus.cc)
+  SET_TARGET_PROPERTIES(f32-expminus-eval PROPERTIES
     CXX_STANDARD 11
     CXX_STANDARD_REQUIRED YES
-    CXX_EXTENSIONS YES)
-  TARGET_INCLUDE_DIRECTORIES(f32-extexp-eval PRIVATE src)
-  TARGET_LINK_LIBRARIES(f32-extexp-eval PRIVATE XNNPACK benchmark fp16)
+    CXX_EXTENSIONS NO)
+  TARGET_INCLUDE_DIRECTORIES(f32-expminus-eval PRIVATE src)
+  TARGET_LINK_LIBRARIES(f32-expminus-eval PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
 
   ADD_EXECUTABLE(f32-roundne-eval eval/f32-roundne.cc)
   SET_TARGET_PROPERTIES(f32-roundne-eval PROPERTIES
@@ -4389,7 +4595,7 @@
     CXX_STANDARD_REQUIRED YES
     CXX_EXTENSIONS NO)
   TARGET_INCLUDE_DIRECTORIES(f32-roundne-eval PRIVATE src)
-  TARGET_LINK_LIBRARIES(f32-roundne-eval PRIVATE XNNPACK fp16 gtest gtest_main)
+  TARGET_LINK_LIBRARIES(f32-roundne-eval PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
 
   ADD_EXECUTABLE(f32-roundd-eval eval/f32-roundd.cc)
   SET_TARGET_PROPERTIES(f32-roundd-eval PROPERTIES
@@ -4397,7 +4603,7 @@
     CXX_STANDARD_REQUIRED YES
     CXX_EXTENSIONS NO)
   TARGET_INCLUDE_DIRECTORIES(f32-roundd-eval PRIVATE src)
-  TARGET_LINK_LIBRARIES(f32-roundd-eval PRIVATE XNNPACK fp16 gtest gtest_main)
+  TARGET_LINK_LIBRARIES(f32-roundd-eval PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
 
   ADD_EXECUTABLE(f32-roundu-eval eval/f32-roundu.cc)
   SET_TARGET_PROPERTIES(f32-roundu-eval PROPERTIES
@@ -4405,7 +4611,7 @@
     CXX_STANDARD_REQUIRED YES
     CXX_EXTENSIONS NO)
   TARGET_INCLUDE_DIRECTORIES(f32-roundu-eval PRIVATE src)
-  TARGET_LINK_LIBRARIES(f32-roundu-eval PRIVATE XNNPACK fp16 gtest gtest_main)
+  TARGET_LINK_LIBRARIES(f32-roundu-eval PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
 
   ADD_EXECUTABLE(f32-roundz-eval eval/f32-roundz.cc)
   SET_TARGET_PROPERTIES(f32-roundz-eval PROPERTIES
@@ -4413,23 +4619,7 @@
     CXX_STANDARD_REQUIRED YES
     CXX_EXTENSIONS NO)
   TARGET_INCLUDE_DIRECTORIES(f32-roundz-eval PRIVATE src)
-  TARGET_LINK_LIBRARIES(f32-roundz-eval PRIVATE XNNPACK fp16 gtest gtest_main)
-
-  ADD_EXECUTABLE(f32-sigmoid-eval eval/f32-sigmoid.cc)
-  SET_TARGET_PROPERTIES(f32-sigmoid-eval PROPERTIES
-    CXX_STANDARD 11
-    CXX_STANDARD_REQUIRED YES
-    CXX_EXTENSIONS NO)
-  TARGET_INCLUDE_DIRECTORIES(f32-sigmoid-eval PRIVATE src)
-  TARGET_LINK_LIBRARIES(f32-sigmoid-eval PRIVATE XNNPACK fp16 benchmark)
-
-  ADD_EXECUTABLE(f32-sqrt-eval eval/f32-sqrt.cc)
-  SET_TARGET_PROPERTIES(f32-sqrt-eval PROPERTIES
-    CXX_STANDARD 11
-    CXX_STANDARD_REQUIRED YES
-    CXX_EXTENSIONS NO)
-  TARGET_INCLUDE_DIRECTORIES(f32-sqrt-eval PRIVATE . src)
-  TARGET_LINK_LIBRARIES(f32-sqrt-eval PRIVATE XNNPACK fp16 benchmark bench-utils)
+  TARGET_LINK_LIBRARIES(f32-roundz-eval PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
 
   # ---[ Build end-to-end microbenchmarks
   ADD_LIBRARY(bench-models STATIC
@@ -4523,6 +4713,14 @@
   TARGET_INCLUDE_DIRECTORIES(deconvolution-bench PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
   TARGET_LINK_LIBRARIES(deconvolution-bench PRIVATE XNNPACK benchmark bench-utils)
 
+  ADD_EXECUTABLE(elu-bench bench/elu.cc)
+  SET_TARGET_PROPERTIES(elu-bench PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS NO)
+  TARGET_INCLUDE_DIRECTORIES(elu-bench PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
+  TARGET_LINK_LIBRARIES(elu-bench PRIVATE XNNPACK benchmark bench-utils)
+
   ADD_EXECUTABLE(floor-bench bench/floor.cc)
   SET_TARGET_PROPERTIES(floor-bench PROPERTIES
     CXX_STANDARD 11
@@ -4743,6 +4941,15 @@
   TARGET_INCLUDE_DIRECTORIES(f32-softmax-bench PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
   TARGET_LINK_LIBRARIES(f32-softmax-bench PRIVATE XNNPACK fp16 benchmark bench-utils)
 
+  ADD_EXECUTABLE(f32-velu-bench bench/f32-velu.cc)
+  SET_TARGET_PROPERTIES(f32-velu-bench PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS YES)
+  TARGET_INCLUDE_DIRECTORIES(f32-velu-bench PRIVATE src)
+  TARGET_INCLUDE_DIRECTORIES(f32-velu-bench PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
+  TARGET_LINK_LIBRARIES(f32-velu-bench PRIVATE XNNPACK fp16 benchmark bench-utils)
+
   ADD_EXECUTABLE(f32-vsqrt-bench bench/f32-vsqrt.cc)
   SET_TARGET_PROPERTIES(f32-vsqrt-bench PROPERTIES
     CXX_STANDARD 11
diff --git a/METADATA b/METADATA
index 3a20921..804fead 100644
--- a/METADATA
+++ b/METADATA
@@ -9,11 +9,11 @@
     type: GIT
     value: "https://github.com/google/XNNPACK"
   }
-  version: "de390d4b6fd1ca0b82bba3640fbd3cc9bbc5c4ef"
+  version: "db2475b8af7e129b722167c3579ea743735fd733"
   license_type: NOTICE
   last_upgrade_date {
-    year: 2020
-    month: 11
-    day: 30
+    year: 2021
+    month: 1
+    day: 5
   }
 }
diff --git a/README.md b/README.md
index fb89265..e6f09c2 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
 
 ## Supported Architectures
 
-- ARM64 on Android, Linux, and iOS (including WatchOS and tvOS)
+- ARM64 on Android, Linux, macOS, and iOS (including WatchOS and tvOS)
 - ARMv7 (with NEON) on Android, Linux, and iOS (including WatchOS)
 - x86 and x86-64 (up to AVX512) on Windows, Linux, macOS, Android, and iOS simulator
 - WebAssembly MVP
@@ -21,6 +21,7 @@
 - 2D ArgMax Pooling (Max Pooling + indices)
 - 2D Unpooling
 - 2D Bilinear Resize
+- 2D Depth-to-Space (AKA Pixel Shuffle)
 - Add (including broadcasting, two inputs only)
 - Subtract (including broadcasting)
 - Divide (including broadcasting)
@@ -36,6 +37,7 @@
 - Ceiling (rounding to integer above)
 - Clamp (includes ReLU and ReLU6)
 - Copy
+- ELU
 - Floor (rounding to integer below)
 - HardSwish
 - Leaky ReLU
@@ -104,6 +106,8 @@
 - [TensorFlow.js WebAssembly backend](https://blog.tensorflow.org/2020/03/introducing-webassembly-backend-for-tensorflow-js.html).
 - [PyTorch Mobile](https://pytorch.org/mobile).
 - [MediaPipe for the Web](https://developers.googleblog.com/2020/01/mediapipe-on-web.html).
+- [Alibaba HALO (Heterogeneity-Aware Lowering and Optimization)](https://github.com/alibaba/heterogeneity-aware-lowering-and-optimization)
+- [Samsung ONE (On-device Neural Engine)](https://github.com/Samsung/ONE)
 
 ## Acknowledgements
 
diff --git a/WORKSPACE b/WORKSPACE
index 66f2c79..a901453 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -45,9 +45,9 @@
 # pthreadpool library, used for parallelization
 http_archive(
     name = "pthreadpool",
-    strip_prefix = "pthreadpool-029c88620802e1361ccf41d1970bd5b07fd6b7bb",
-    sha256 = "03312bd7d8d9e379d685258963ee8820767158b5946cdd00336ff17dae851001",
-    urls = ["https://github.com/Maratyszcza/pthreadpool/archive/029c88620802e1361ccf41d1970bd5b07fd6b7bb.zip"],
+    strip_prefix = "pthreadpool-545ebe9f225aec6dca49109516fac02e973a3de2",
+    sha256 = "8461f6540ae9f777ce20d1c0d1d249e5e61c438744fb390c0c6f91940aa69ea3",
+    urls = ["https://github.com/Maratyszcza/pthreadpool/archive/545ebe9f225aec6dca49109516fac02e973a3de2.zip"],
 )
 
 # clog library, used for logging
@@ -65,24 +65,15 @@
 # cpuinfo library, used for detecting processor characteristics
 http_archive(
     name = "cpuinfo",
-    strip_prefix = "cpuinfo-6cecd15784fcb6c5c0aa7311c6248879ce2cb8b2",
-    sha256 = "b1f2ee97e46d8917a66bcb47452fc510d511829556c93b83e06841b9b35261a5",
+    strip_prefix = "cpuinfo-ed8b86a253800bafdb7b25c5c399f91bff9cb1f3",
+    sha256 = "a7f9a188148a1660149878f737f42783e72f33a4f842f3e362fee2c981613e53",
     urls = [
-        "https://github.com/pytorch/cpuinfo/archive/6cecd15784fcb6c5c0aa7311c6248879ce2cb8b2.zip",
+        "https://github.com/pytorch/cpuinfo/archive/ed8b86a253800bafdb7b25c5c399f91bff9cb1f3.zip",
     ],
     build_file = "@//third_party:cpuinfo.BUILD",
     patches = ["@//third_party:cpuinfo.patch"],
 )
 
-# psimd library, used for fallback 128-bit SIMD micro-kernels
-http_archive(
-    name = "psimd",
-    strip_prefix = "psimd-072586a71b55b7f8c584153d223e95687148a900",
-    sha256 = "dc615342bcbe51ca885323e51b68b90ed9bb9fa7df0f4419dbfa0297d5e837b7",
-    urls = ["https://github.com/Maratyszcza/psimd/archive/072586a71b55b7f8c584153d223e95687148a900.zip"],
-    build_file = "@//third_party:psimd.BUILD",
-)
-
 # Ruy library, used to benchmark against
 http_archive(
    name = "ruy",
diff --git a/bench/average-pooling.cc b/bench/average-pooling.cc
index a228900..d4720bd 100644
--- a/bench/average-pooling.cc
+++ b/bench/average-pooling.cc
@@ -95,7 +95,10 @@
   }
   pooling_op = nullptr;
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) *
@@ -169,7 +172,10 @@
   }
   pooling_op = nullptr;
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) *
@@ -304,7 +310,10 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) *
diff --git a/bench/bankers-rounding.cc b/bench/bankers-rounding.cc
index 9925c27..09415cf 100644
--- a/bench/bankers-rounding.cc
+++ b/bench/bankers-rounding.cc
@@ -77,7 +77,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -184,7 +187,10 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
diff --git a/bench/ceiling.cc b/bench/ceiling.cc
index 6cff9b1..6c2e164 100644
--- a/bench/ceiling.cc
+++ b/bench/ceiling.cc
@@ -77,7 +77,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -184,7 +187,10 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
diff --git a/bench/channel-shuffle.cc b/bench/channel-shuffle.cc
index 97d5ab0..b45df59 100644
--- a/bench/channel-shuffle.cc
+++ b/bench/channel-shuffle.cc
@@ -73,7 +73,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * groups * group_channels;
   state.counters["elements"] =
@@ -138,7 +141,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * groups * group_channels;
   state.counters["elements"] =
diff --git a/bench/convolution.cc b/bench/convolution.cc
index b0629d7..4616f5f 100644
--- a/bench/convolution.cc
+++ b/bench/convolution.cc
@@ -143,7 +143,11 @@
     convolution_op = nullptr;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["OPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       batch_size * output_height * output_width *
@@ -259,7 +263,11 @@
     convolution_op = nullptr;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["OPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       batch_size * output_height * output_width *
@@ -375,7 +383,11 @@
     convolution_op = nullptr;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       batch_size * output_height * output_width *
@@ -486,7 +498,11 @@
     convolution_op = nullptr;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       batch_size * output_height * output_width *
@@ -693,7 +709,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       batch_size * output_height * output_width *
@@ -1016,7 +1036,11 @@
   bias_tensor.allocator()->free();
   output_tensor.allocator()->free();
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       batch_size * output_height * output_width *
diff --git a/bench/deconvolution.cc b/bench/deconvolution.cc
index 7ac9623..3d01df6 100644
--- a/bench/deconvolution.cc
+++ b/bench/deconvolution.cc
@@ -130,13 +130,17 @@
     deconvolution_op = nullptr;
   }
 
-    state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
-    state.counters["OPS"] = benchmark::Counter(
-    uint64_t(state.iterations()) * 2 *
-      batch_size * input_width * input_width *
-      groups * group_input_channels * group_output_channels *
-      kernel_height * kernel_width,
-    benchmark::Counter::kIsRate);
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  state.counters["OPS"] = benchmark::Counter(
+  uint64_t(state.iterations()) * 2 *
+    batch_size * input_width * input_width *
+    groups * group_input_channels * group_output_channels *
+    kernel_height * kernel_width,
+  benchmark::Counter::kIsRate);
 }
 #endif  // XNN_NO_QU8_OPERATORS
 
@@ -243,7 +247,11 @@
     deconvolution_op = nullptr;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       batch_size * input_width * input_width *
@@ -427,7 +435,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       batch_size * input_width * input_width *
diff --git a/bench/elu.cc b/bench/elu.cc
new file mode 100644
index 0000000..5cd1eb9
--- /dev/null
+++ b/bench/elu.cc
@@ -0,0 +1,226 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <functional>
+#include <limits>
+#include <random>
+#include <vector>
+
+#include <xnnpack.h>
+
+#include <benchmark/benchmark.h>
+#include "bench/utils.h"
+#ifdef BENCHMARK_TENSORFLOW_LITE
+#include "flatbuffers/include/flatbuffers/flatbuffers.h"
+#include "tensorflow/lite/interpreter.h"
+#include "tensorflow/lite/kernels/register.h"
+#include "tensorflow/lite/model.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+#include "tensorflow/lite/version.h"
+#endif  // BENCHMARK_TENSORFLOW_LITE
+
+
+static void xnnpack_elu_f32(benchmark::State& state) {
+  const size_t batch_size = state.range(0);
+  const size_t channels = state.range(1);
+
+  std::random_device random_device;
+  auto rng = std::mt19937(random_device());
+  auto f32rng = std::bind(std::uniform_real_distribution<float>(-20.0f, 20.0f), std::ref(rng));
+
+  std::vector<float> input(batch_size * channels);
+  std::vector<float> output(batch_size * channels);
+  std::generate(input.begin(), input.end(), std::ref(f32rng));
+  std::fill(output.begin(), output.end(), std::nanf(""));
+
+  xnn_status status = xnn_initialize(nullptr /* allocator */);
+  if (status != xnn_status_success) {
+    state.SkipWithError("failed to initialize XNNPACK");
+    return;
+  }
+
+  xnn_operator_t elu_op = nullptr;
+  status = xnn_create_elu_nc_f32(
+    channels, channels /* input stride */, channels /* output stride */,
+    1.0f /* alpha */, 0 /* flags */, &elu_op);
+  if (status != xnn_status_success || elu_op == nullptr) {
+    state.SkipWithError("failed to create ELU operator");
+    return;
+  }
+
+  status = xnn_setup_elu_nc_f32(
+    elu_op,
+    batch_size,
+    input.data(), output.data(),
+    nullptr /* thread pool */);
+  if (status != xnn_status_success) {
+    state.SkipWithError("failed to setup ELU operator");
+    return;
+  }
+
+  for (auto _ : state) {
+    status = xnn_run_operator(elu_op, nullptr /* thread pool */);
+    if (status != xnn_status_success) {
+      state.SkipWithError("failed to run ELU operator");
+      return;
+    }
+  }
+
+  status = xnn_delete_operator(elu_op);
+  if (status != xnn_status_success) {
+    state.SkipWithError("failed to delete ELU operator");
+    return;
+  }
+
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = batch_size * channels;
+  state.counters["elements"] =
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements_per_iteration * sizeof(float);
+  state.counters["bytes"] =
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
+}
+
+#ifdef BENCHMARK_TENSORFLOW_LITE
+static void tflite_elu_f32(benchmark::State& state) {
+  const size_t batch_size = state.range(0);
+  const size_t channels = state.range(1);
+
+  std::random_device random_device;
+  auto rng = std::mt19937(random_device());
+  auto f32rng = std::bind(std::uniform_real_distribution<float>(-20.0f, 20.0f), std::ref(rng));
+
+  flatbuffers::FlatBufferBuilder builder;
+  const flatbuffers::Offset<tflite::OperatorCode> operator_code =
+      CreateOperatorCode(builder, tflite::BuiltinOperator_ELU);
+
+  const std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers{{
+    tflite::CreateBuffer(builder, builder.CreateVector({})),
+  }};
+
+  const std::array<int32_t, 4> input_shape{{
+    static_cast<int32_t>(batch_size),
+    static_cast<int32_t>(1 /* height */),
+    static_cast<int32_t>(1 /* width */),
+    static_cast<int32_t>(channels)
+  }};
+  const std::array<int32_t, 4> output_shape{{
+    static_cast<int32_t>(batch_size),
+    static_cast<int32_t>(1 /* height */),
+    static_cast<int32_t>(1 /* width */),
+    static_cast<int32_t>(channels)
+  }};
+
+  const std::array<flatbuffers::Offset<tflite::Tensor>, 2> tensors{{
+    tflite::CreateTensor(builder,
+                         builder.CreateVector<int32_t>(input_shape.data(), input_shape.size()),
+                         tflite::TensorType_FLOAT32),
+    tflite::CreateTensor(builder,
+                         builder.CreateVector<int32_t>(output_shape.data(), output_shape.size()),
+                         tflite::TensorType_FLOAT32),
+  }};
+
+  const std::array<int32_t, 1> op_inputs{{ 0 }};
+  const std::array<int32_t, 1> op_outputs{{ 1 }};
+  flatbuffers::Offset<tflite::Operator> op = tflite::CreateOperator(
+      builder,
+      0 /* opcode_index */,
+      builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
+      builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()));
+
+  const std::array<int32_t, 1> graph_inputs{{ 0 }};
+  const std::array<int32_t, 1> graph_outputs{{ 1 }};
+  const flatbuffers::Offset<tflite::SubGraph> subgraph = tflite::CreateSubGraph(
+      builder,
+      builder.CreateVector(tensors.data(), tensors.size()),
+      builder.CreateVector<int32_t>(graph_inputs.data(), graph_inputs.size()),
+      builder.CreateVector<int32_t>(graph_outputs.data(), graph_outputs.size()),
+      builder.CreateVector(&op, 1));
+
+  const flatbuffers::Offset<tflite::Model> model_buffer = tflite::CreateModel(builder,
+      TFLITE_SCHEMA_VERSION,
+      builder.CreateVector(&operator_code, 1),
+      builder.CreateVector(&subgraph, 1),
+      builder.CreateString("ELU model"),
+      builder.CreateVector(buffers.data(), buffers.size()));
+
+  builder.Finish(model_buffer);
+
+  const tflite::Model* model = tflite::GetModel(builder.GetBufferPointer());
+  tflite::ops::builtin::BuiltinOpResolver resolver;
+  tflite::InterpreterBuilder interpreterBuilder(model, resolver);
+  std::unique_ptr<tflite::Interpreter> interpreter;
+  if (interpreterBuilder(&interpreter) != kTfLiteOk) {
+    state.SkipWithError("failed to create TFLite interpreter");
+    return;
+  }
+  if (interpreter == nullptr) {
+    state.SkipWithError("TFLite interpreter is null");
+    return;
+  }
+  interpreter->SetNumThreads(1);
+
+  if (interpreter->AllocateTensors() != kTfLiteOk) {
+    state.SkipWithError("failed to allocate tensors");
+    return;
+  }
+
+  std::generate(
+    interpreter->typed_tensor<float>(0),
+    interpreter->typed_tensor<float>(0) + batch_size * channels,
+    std::ref(f32rng));
+
+  for (auto _ : state) {
+    if (interpreter->Invoke() != kTfLiteOk) {
+      state.SkipWithError("failed to invoke TFLite interpreter");
+      return;
+    }
+  }
+
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = batch_size * channels;
+  state.counters["elements"] =
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements_per_iteration * sizeof(float);
+  state.counters["bytes"] =
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
+
+  interpreter.reset();
+}
+#endif  // BENCHMARK_TENSORFLOW_LITE
+
+static void CharacteristicArguments(benchmark::internal::Benchmark* b)
+{
+  b->ArgNames({"N", "C"});
+
+  int32_t c = 16;
+  for (int32_t n = 224; n >= 7; n /= 2) {
+    b->Args({n * n, c});
+    c *= 2;
+  }
+}
+
+BENCHMARK(xnnpack_elu_f32)->Apply(CharacteristicArguments)->UseRealTime();
+
+#ifdef BENCHMARK_TENSORFLOW_LITE
+  BENCHMARK(tflite_elu_f32)->Apply(CharacteristicArguments)->UseRealTime();
+#endif  // BENCHMARK_TENSORFLOW_LITE
+
+#ifndef XNNPACK_BENCHMARK_NO_MAIN
+BENCHMARK_MAIN();
+#endif
diff --git a/bench/end2end.cc b/bench/end2end.cc
index 5052bda..52c5436 100644
--- a/bench/end2end.cc
+++ b/bench/end2end.cc
@@ -45,7 +45,11 @@
       }
     }
   }
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 }
 
 static void FP32MobileNetV1(benchmark::State& state) {
diff --git a/bench/f16-dwconv.cc b/bench/f16-dwconv.cc
index 836da0b..e8649d5 100644
--- a/bench/f16-dwconv.cc
+++ b/bench/f16-dwconv.cc
@@ -143,12 +143,15 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
-  state.counters["FLOPS"] = benchmark::Counter(
-    uint64_t(state.iterations()) * 2 * output_size * channels * kernel_size,
-    benchmark::Counter::kIsRate);
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
-  state.counters["BYTES"] = benchmark::Counter(
+  state.counters["FLOPS"] = benchmark::Counter(
+    uint64_t(state.iterations()) * 2 * output_size * channels * kernel_size, benchmark::Counter::kIsRate);
+
+  state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) * (output_size + input_height * input_width + kernel_size + 1 /* bias */) * channels * sizeof(uint16_t),
     benchmark::Counter::kIsRate);
 }
@@ -156,62 +159,62 @@
 #if XNN_ARCH_ARM64
   static void f16_dwconv_8x25__neonfp16arith_acc2(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith_acc2, 8, 25,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_8x25__neonfp16arith(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith, 8, 25,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_8x4__neonfp16arith_acc2(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up8x4__neonfp16arith_acc2, 8, 4,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_8x4__neonfp16arith(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up8x4__neonfp16arith, 8, 4,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_8x9__neonfp16arith_acc2(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith_acc2, 8, 9,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_8x9__neonfp16arith(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith, 8, 9,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_16x25__neonfp16arith_acc2(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith_acc2, 16, 25,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_16x25__neonfp16arith(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith, 16, 25,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_16x4__neonfp16arith_acc2(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up16x4__neonfp16arith_acc2, 16, 4,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_16x4__neonfp16arith(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up16x4__neonfp16arith, 16, 4,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_16x9__neonfp16arith_acc2(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2, 16, 9,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   static void f16_dwconv_16x9__neonfp16arith(benchmark::State& state, const char* net) {
     DWConvBenchmark(state, xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith, 16, 9,
-    benchmark::utils::CheckNEONFP16ARITH);
+      benchmark::utils::CheckNEONFP16ARITH);
   }
 
   BENCHMARK_DWCONV(f16_dwconv_8x25__neonfp16arith_acc2)
diff --git a/bench/f16-gemm.cc b/bench/f16-gemm.cc
index 738883d..07f0f1d 100644
--- a/bench/f16-gemm.cc
+++ b/bench/f16-gemm.cc
@@ -100,7 +100,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
diff --git a/bench/f16-igemm.cc b/bench/f16-igemm.cc
index e0ea555..51a42ab 100644
--- a/bench/f16-igemm.cc
+++ b/bench/f16-igemm.cc
@@ -150,7 +150,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       output_height * output_width *
diff --git a/bench/f16-relu.cc b/bench/f16-relu.cc
index 404d57c..8a375af 100644
--- a/bench/f16-relu.cc
+++ b/bench/f16-relu.cc
@@ -9,8 +9,6 @@
 #include <random>
 #include <vector>
 
-#include <cpuinfo.h>
-
 #include <benchmark/benchmark.h>
 #include <fp16/fp16.h>
 #include "bench/utils.h"
@@ -25,37 +23,38 @@
   xnn_f16_relu_ukernel_function f16_relu,
   benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
-    return;
-  }
   if (isa_check && !isa_check(state)) {
     return;
   }
 
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-10.0f, 10.0f), std::ref(rng));
   auto f16rng = std::bind(fp16_ieee_from_fp32_value, f32rng);
 
-  std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> x(n);
+  std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> x(elements);
   std::generate(x.begin(), x.end(), std::ref(f16rng));
-  std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> y(n);
+  std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> y(elements);
   std::generate(x.begin(), x.end(), std::ref(f16rng));
 
   for (auto _ : state) {
-    f16_relu(n * sizeof(uint16_t), x.data(), y.data(), NULL);
+    f16_relu(elements * sizeof(uint16_t), x.data(), y.data(), NULL);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
 
+  const size_t bytes_per_iteration = 2 * elements * sizeof(uint16_t);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n * sizeof(uint16_t), benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 #if XNN_ARCH_ARM64
diff --git a/bench/f16-spmm.cc b/bench/f16-spmm.cc
index a19f736..8c9464c 100644
--- a/bench/f16-spmm.cc
+++ b/bench/f16-spmm.cc
@@ -14,7 +14,7 @@
 
 #include <benchmark/benchmark.h>
 #include <fp16/fp16.h>
-#include "bench/gemm.h"
+#include "bench/spmm.h"
 #include "bench/utils.h"
 #include <xnnpack/AlignedAllocator.h>
 #include <xnnpack/common.h>
@@ -155,7 +155,11 @@
       &params);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * num_nonzeroes, benchmark::Counter::kIsRate);
 
@@ -190,14 +194,14 @@
     SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith_x2, 32, 1, 0.8f);
   }
 
-  BENCHMARK_GEMM(spmm80_8x1__neonfp16arith)
-  BENCHMARK_GEMM(spmm80_8x1__neonfp16arith_x2)
-  BENCHMARK_GEMM(spmm80_16x1__neonfp16arith)
-  BENCHMARK_GEMM(spmm80_16x1__neonfp16arith_x2)
-  BENCHMARK_GEMM(spmm80_24x1__neonfp16arith)
-  BENCHMARK_GEMM(spmm80_24x1__neonfp16arith_x2)
-  BENCHMARK_GEMM(spmm80_32x1__neonfp16arith)
-  BENCHMARK_GEMM(spmm80_32x1__neonfp16arith_x2)
+  BENCHMARK_SPMM(spmm80_8x1__neonfp16arith)
+  BENCHMARK_SPMM(spmm80_8x1__neonfp16arith_x2)
+  BENCHMARK_SPMM(spmm80_16x1__neonfp16arith)
+  BENCHMARK_SPMM(spmm80_16x1__neonfp16arith_x2)
+  BENCHMARK_SPMM(spmm80_24x1__neonfp16arith)
+  BENCHMARK_SPMM(spmm80_24x1__neonfp16arith_x2)
+  BENCHMARK_SPMM(spmm80_32x1__neonfp16arith)
+  BENCHMARK_SPMM(spmm80_32x1__neonfp16arith_x2)
 #endif  // XNN_ARCH_ARM64
 
 #ifndef XNNPACK_BENCHMARK_NO_MAIN
diff --git a/bench/f32-conv-hwc.cc b/bench/f32-conv-hwc.cc
index df2f4e8..3309e26 100644
--- a/bench/f32-conv-hwc.cc
+++ b/bench/f32-conv-hwc.cc
@@ -10,8 +10,6 @@
 #include <random>
 #include <vector>
 
-#include <cpuinfo.h>
-
 #include <benchmark/benchmark.h>
 #include "bench/dconv.h"
 #include "bench/utils.h"
@@ -25,10 +23,11 @@
 
 static void DConv3X3S2P1Benchmark(benchmark::State& state,
   xnn_f32_conv_hwc_ukernel_function conv,
-  uint32_t output_channels_tile)
+  uint32_t output_channels_tile,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
+  if (isa_check && !isa_check(state)) {
+    return;
   }
 
   const size_t input_height = state.range(0);
@@ -100,7 +99,11 @@
       &params);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       output_height * output_width *
@@ -111,16 +114,16 @@
 
 #if XNN_ARCH_ARM64
   static void f32_conv_hwc_3x3s2p1c3x8__neonfma_2x1(benchmark::State& state, const char* net) {
-    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x1, 8);
+    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x1, 8, benchmark::utils::CheckNEONFMA);
   }
   static void f32_conv_hwc_3x3s2p1c3x4__neonfma_2x1(benchmark::State& state, const char* net) {
-    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x1, 4);
+    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x1, 4, benchmark::utils::CheckNEONFMA);
   }
   static void f32_conv_hwc_3x3s2p1c3x8__neonfma_2x2(benchmark::State& state, const char* net) {
-    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x2, 8);
+    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x2, 8, benchmark::utils::CheckNEONFMA);
   }
   static void f32_conv_hwc_3x3s2p1c3x4__neonfma_2x2(benchmark::State& state, const char* net) {
-    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x2, 4);
+    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x2, 4, benchmark::utils::CheckNEONFMA);
   }
 
   BENCHMARK_DCONV(f32_conv_hwc_3x3s2p1c3x8__neonfma_2x1);
@@ -131,16 +134,16 @@
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
   static void f32_conv_hwc_3x3s2p1c3x8__neon_2x1(benchmark::State& state, const char* net) {
-    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neon_2x1, 8);
+    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neon_2x1, 8, benchmark::utils::CheckNEON);
   }
   static void f32_conv_hwc_3x3s2p1c3x4__neon_2x1(benchmark::State& state, const char* net) {
-    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neon_2x1, 4);
+    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neon_2x1, 4, benchmark::utils::CheckNEON);
   }
   static void f32_conv_hwc_3x3s2p1c3x8__neon_2x2(benchmark::State& state, const char* net) {
-    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neon_2x2, 8);
+    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neon_2x2, 8, benchmark::utils::CheckNEON);
   }
   static void f32_conv_hwc_3x3s2p1c3x4__neon_2x2(benchmark::State& state, const char* net) {
-    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neon_2x2, 4);
+    DConv3X3S2P1Benchmark(state, xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neon_2x2, 4, benchmark::utils::CheckNEON);
   }
 
   BENCHMARK_DCONV(f32_conv_hwc_3x3s2p1c3x8__neon_2x1);
diff --git a/bench/f32-conv-hwc2chw.cc b/bench/f32-conv-hwc2chw.cc
index 301dc91..1b1047b 100644
--- a/bench/f32-conv-hwc2chw.cc
+++ b/bench/f32-conv-hwc2chw.cc
@@ -25,10 +25,11 @@
 
 static void DConvHWC2CHW3X3S2P1Benchmark(benchmark::State& state,
   xnn_f32_conv_hwc2chw_ukernel_function conv,
-  uint32_t output_channels_tile)
+  uint32_t output_channels_tile,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
+  if (isa_check && !isa_check(state)) {
+    return;
   }
 
   const size_t input_height = state.range(0);
@@ -100,7 +101,11 @@
       &params);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       output_height * output_width *
@@ -111,7 +116,7 @@
 
 #if XNN_ARCH_ARM64
   static void f32_conv_hwc2chw_3x3s2p1c3x4__neonfma_2x2(benchmark::State& state, const char* net) {
-    DConvHWC2CHW3X3S2P1Benchmark(state, xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2, 4);
+    DConvHWC2CHW3X3S2P1Benchmark(state, xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2, 4, benchmark::utils::CheckNEONFMA);
   }
 
   BENCHMARK_DCONV(f32_conv_hwc2chw_3x3s2p1c3x4__neonfma_2x2);
diff --git a/bench/f32-dwconv-e2e.cc b/bench/f32-dwconv-e2e.cc
index 367f908..007fde2 100644
--- a/bench/f32-dwconv-e2e.cc
+++ b/bench/f32-dwconv-e2e.cc
@@ -63,7 +63,11 @@
       }
     }
   }
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 }
 
 #if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
@@ -257,9 +261,9 @@
       4 /* cr */, 9 /* mr */);
   }
 
-  static void f32_dwconv_up4x9__wasmsimd_acc2_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_dwconv_up4x9__wasmsimd_arm_acc2(benchmark::State& state, models::ExecutionPlanFactory model) {
     DWConvEnd2EndBenchmark(state, model,
-      xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm,
+      xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2,
       4 /* cr */, 9 /* mr */);
   }
 
@@ -269,9 +273,9 @@
       8 /* cr */, 9 /* mr */);
   }
 
-  static void f32_dwconv_up8x9__wasmsimd_acc2_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_dwconv_up8x9__wasmsimd_arm_acc2(benchmark::State& state, models::ExecutionPlanFactory model) {
     DWConvEnd2EndBenchmark(state, model,
-      xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm,
+      xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2,
       8 /* cr */, 9 /* mr */);
   }
 
@@ -281,9 +285,9 @@
       4 /* cr */, 9 /* mr */);
   }
 
-  static void f32_dwconv_up4x9__wasmsimd_acc2_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_dwconv_up4x9__wasmsimd_x86_acc2(benchmark::State& state, models::ExecutionPlanFactory model) {
     DWConvEnd2EndBenchmark(state, model,
-      xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86,
+      xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2,
       4 /* cr */, 9 /* mr */);
   }
 
@@ -293,21 +297,21 @@
       8 /* cr */, 9 /* mr */);
   }
 
-  static void f32_dwconv_up8x9__wasmsimd_acc2_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_dwconv_up8x9__wasmsimd_x86_acc2(benchmark::State& state, models::ExecutionPlanFactory model) {
     DWConvEnd2EndBenchmark(state, model,
-      xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86,
+      xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2,
       8 /* cr */, 9 /* mr */);
   }
 
   BENCHMARK_FP32_END2END(f32_dwconv_up4x9__wasmsimd_arm);
-  BENCHMARK_FP32_END2END(f32_dwconv_up4x9__wasmsimd_acc2_arm);
+  BENCHMARK_FP32_END2END(f32_dwconv_up4x9__wasmsimd_arm_acc2);
   BENCHMARK_FP32_END2END(f32_dwconv_up8x9__wasmsimd_arm);
-  BENCHMARK_FP32_END2END(f32_dwconv_up8x9__wasmsimd_acc2_arm);
+  BENCHMARK_FP32_END2END(f32_dwconv_up8x9__wasmsimd_arm_acc2);
 
   BENCHMARK_FP32_END2END(f32_dwconv_up4x9__wasmsimd_x86);
-  BENCHMARK_FP32_END2END(f32_dwconv_up4x9__wasmsimd_acc2_x86);
+  BENCHMARK_FP32_END2END(f32_dwconv_up4x9__wasmsimd_x86_acc2);
   BENCHMARK_FP32_END2END(f32_dwconv_up8x9__wasmsimd_x86);
-  BENCHMARK_FP32_END2END(f32_dwconv_up8x9__wasmsimd_acc2_x86);
+  BENCHMARK_FP32_END2END(f32_dwconv_up8x9__wasmsimd_x86_acc2);
 #endif  // XNN_ARCH_WASMSIMD
 
 static void f32_dwconv_up1x9__scalar(benchmark::State& state, models::ExecutionPlanFactory model) {
diff --git a/bench/f32-dwconv.cc b/bench/f32-dwconv.cc
index 648cf34..20d94cc 100644
--- a/bench/f32-dwconv.cc
+++ b/bench/f32-dwconv.cc
@@ -10,8 +10,6 @@
 #include <random>
 #include <vector>
 
-#include <cpuinfo.h>
-
 #include <benchmark/benchmark.h>
 #include "bench/dwconv.h"
 #include "bench/utils.h"
@@ -30,10 +28,6 @@
   uint32_t cr, uint32_t kr,
   benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
-    return;
-  }
   if (isa_check && !isa_check(state)) {
     return;
   }
@@ -141,12 +135,16 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * output_size * channels * kernel_size,
     benchmark::Counter::kIsRate);
 
-  state.counters["BYTES"] = benchmark::Counter(
+  state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) * (output_size + input_height * input_width + kernel_size + 1 /* bias */) * channels * sizeof(float),
     benchmark::Counter::kIsRate);
 }
diff --git a/bench/f32-dwconv2d-chw.cc b/bench/f32-dwconv2d-chw.cc
index a7aff8a..81c5c7f 100644
--- a/bench/f32-dwconv2d-chw.cc
+++ b/bench/f32-dwconv2d-chw.cc
@@ -10,8 +10,6 @@
 #include <random>
 #include <vector>
 
-#include <cpuinfo.h>
-
 #include <benchmark/benchmark.h>
 #include "bench/dwconv.h"
 #include "bench/utils.h"
@@ -30,10 +28,6 @@
   uint32_t kh, uint32_t kw, uint32_t pw, uint32_t s,
   benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
-    return;
-  }
   if (isa_check && !isa_check(state)) {
     return;
   }
@@ -137,46 +131,146 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * output_size * channels * kernel_size,
     benchmark::Counter::kIsRate);
 
-  state.counters["BYTES"] = benchmark::Counter(
+  state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) * (output_size + inputSize + kernel_size + 1 /* bias */) * channels * sizeof(float),
     benchmark::Counter::kIsRate);
 }
 
 #if XNN_ARCH_ARM
   static void dwconv2d_chw_3x3p1__neon_1x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4, 3, 3, 1, 1, benchmark::utils::CheckNEON);
   }
   static void dwconv2d_chw_3x3p1__neon_2x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4, 3, 3, 1, 1, benchmark::utils::CheckNEON);
   }
   static void dwconv2d_chw_3x3p1__neon_3x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4, 3, 3, 1, 1, benchmark::utils::CheckNEON);
   }
   static void dwconv2d_chw_3x3p1__neon_4x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_4x4, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_4x4, 3, 3, 1, 1, benchmark::utils::CheckNEON);
   }
   static void dwconv2d_chw_3x3p1__neon_5x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_5x4, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_5x4, 3, 3, 1, 1, benchmark::utils::CheckNEON);
   }
   static void dwconv2d_chw_3x3p1__neon_6x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_6x4, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_6x4, 3, 3, 1, 1, benchmark::utils::CheckNEON);
   }
   static void dwconv2d_chw_3x3p1__neon_1x4_acc2(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc2, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc2, 3, 3, 1, 1, benchmark::utils::CheckNEON);
   }
   static void dwconv2d_chw_3x3p1__neon_1x4_acc3(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc3, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc3, 3, 3, 1, 1, benchmark::utils::CheckNEON);
   }
   static void dwconv2d_chw_3x3p1__neon_1x4_acc4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc4, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc4, 3, 3, 1, 1, benchmark::utils::CheckNEON);
   }
   static void dwconv2d_chw_3x3p1__neon_2x4_acc2(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4_acc2, 3, 3, 1, 1);
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4_acc2, 3, 3, 1, 1, benchmark::utils::CheckNEON);
+  }
+
+  static void dwconv2d_chw_3x3s2p1__neon_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4, 3, 3, 1, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_3x3s2p1__neon_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_2x4, 3, 3, 1, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_3x3s2p1__neon_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_3x4, 3, 3, 1, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_3x3s2p1__neon_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_4x4, 3, 3, 1, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_3x3s2p1__neon_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc2, 3, 3, 1, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_3x3s2p1__neon_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc3, 3, 3, 1, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_3x3s2p1__neon_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc4, 3, 3, 1, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_3x3s2p1__neon_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_2x4_acc2, 3, 3, 1, 2, benchmark::utils::CheckNEON);
+  }
+
+  static void dwconv2d_chw_5x5p2__neon_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_2x4, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_3x4, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4_acc2, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4_acc3, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4_acc4, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4_acc5, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_2x4_acc2, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_2x4_acc3, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_3x4_acc2, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5p2__neon_4x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2, 5, 5, 2, 1, benchmark::utils::CheckNEON);
+  }
+
+  static void dwconv2d_chw_5x5s2p2__neon_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4, 5, 5, 2, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5s2p2__neon_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_2x4, 5, 5, 2, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5s2p2__neon_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_3x4, 5, 5, 2, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5s2p2__neon_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc2, 5, 5, 2, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5s2p2__neon_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc3, 5, 5, 2, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5s2p2__neon_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc4, 5, 5, 2, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5s2p2__neon_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc5, 5, 5, 2, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5s2p2__neon_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_2x4_acc2, 5, 5, 2, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5s2p2__neon_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_2x4_acc3, 5, 5, 2, 2, benchmark::utils::CheckNEON);
+  }
+  static void dwconv2d_chw_5x5s2p2__neon_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_3x4_acc2, 5, 5, 2, 2, benchmark::utils::CheckNEON);
   }
 
   BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__neon_1x4)
@@ -189,6 +283,40 @@
   BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__neon_1x4_acc3)
   BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__neon_1x4_acc4)
   BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__neon_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__neon_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__neon_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__neon_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__neon_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__neon_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__neon_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__neon_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__neon_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_3x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__neon_4x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__neon_3x4_acc2)
 #endif  // XNN_ARCH_ARM
 
 #if XNN_ARCH_ARM64
@@ -582,114 +710,691 @@
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 #if XNN_ARCH_WASMSIMD
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_1x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_2x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_3x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_4x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_5x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_6x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_6x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_1x4_acc2(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc2, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_1x4_acc3(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_1x4_acc4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_arm_2x4_acc2(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_1x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_2x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_3x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_4x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_5x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_6x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_6x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_1x4_acc2(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc2, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_1x4_acc3(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_1x4_acc4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc4, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3p1__wasmsimd_x86_2x4_acc2(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2, 3, 3, 1, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2, 3, 3, 1, 1);
   }
 
-  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_1x4_acc3(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3, 3, 3, 1, 2);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_5x5p2__wasmsimd_arm_3x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4, 5, 5, 2, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_1x4_acc2(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2, 5, 5, 2, 2);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_1x4_acc3(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3, 3, 3, 1, 2);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_5x5p2__wasmsimd_x86_3x4(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4, 5, 5, 2, 1);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4, 3, 3, 1, 1);
   }
-  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_1x4_acc2(benchmark::State& state, const char* net) {
-    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2, 5, 5, 2, 2);
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_6x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_arm_splat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_6x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4, 3, 3, 1, 1);
+  }
+  static void dwconv2d_chw_3x3p1__wasmsimd_x86_splat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2, 3, 3, 1, 1);
   }
 
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_1x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_2x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_3x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_4x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_5x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_6x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_1x4_acc2)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_1x4_acc3)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_1x4_acc4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_2x4_acc2)
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2, 3, 3, 1, 2);
+  }
 
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_1x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_2x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_3x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_4x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_5x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_6x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_1x4_acc2)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_1x4_acc3)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_1x4_acc4)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_2x4_acc2)
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2, 3, 3, 1, 2);
+  }
 
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_1x4_acc3)
-  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_3x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_1x4_acc2)
-  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_1x4_acc3)
-  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_3x4)
-  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_1x4_acc2)
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2, 3, 3, 1, 2);
+  }
+
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4, 3, 3, 1, 2);
+  }
+  static void dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2, 3, 3, 1, 2);
+  }
+
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2, 5, 5, 2, 1);
+  }
+
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2, 5, 5, 2, 1);
+  }
+
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_arm_splat_4x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2, 5, 5, 2, 1);
+  }
+
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_4x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_5x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2, 5, 5, 2, 1);
+  }
+  static void dwconv2d_chw_5x5p2__wasmsimd_x86_splat_4x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2, 5, 5, 2, 1);
+  }
+
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2, 5, 5, 2, 2);
+  }
+
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2, 5, 5, 2, 2);
+  }
+
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2, 5, 5, 2, 2);
+  }
+
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_2x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_3x4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4_acc4(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4_acc5(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_2x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_2x4_acc3(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3, 5, 5, 2, 2);
+  }
+  static void dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_3x4_acc2(benchmark::State& state, const char* net) {
+    DWConv2DBenchmark(state, xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2, 5, 5, 2, 2);
+  }
+
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_6x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_6x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_6x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_arm_splat_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_6x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3p1__wasmsimd_x86_splat_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_arm_splat_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_3x3s2p1__wasmsimd_x86_splat_2x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_3x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_arm_splat_4x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_4x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_5x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_3x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5p2__wasmsimd_x86_splat_4x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_arm_splat_3x4_acc2)
+
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_2x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_3x4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4_acc4)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_1x4_acc5)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_2x4_acc2)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_2x4_acc3)
+  BENCHMARK_DWCONV(dwconv2d_chw_5x5s2p2__wasmsimd_x86_splat_3x4_acc2)
 #endif  // XNN_ARCH_WASMSIMD
 
 static void dwconv2d_chw_3x3p1__scalar_1x1(benchmark::State& state, const char* net) {
diff --git a/bench/f32-gemm-e2e.cc b/bench/f32-gemm-e2e.cc
index 523ac34..281b4d1 100644
--- a/bench/f32-gemm-e2e.cc
+++ b/bench/f32-gemm-e2e.cc
@@ -65,7 +65,11 @@
       }
     }
   }
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 }
 
 #if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
@@ -544,6 +548,15 @@
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  static void f32_gemm_3x8__sse_load1(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_f32_gemm_minmax_ukernel_3x8__sse_load1,
+      xnn_f32_igemm_minmax_ukernel_3x8__sse_load1,
+      xnn_f32_gemm_minmax_ukernel_1x8__sse_load1,
+      xnn_f32_igemm_minmax_ukernel_1x8__sse_load1,
+      3 /* mr */, 8 /* nr */);
+  }
+
   static void f32_gemm_4x8__sse_load1(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_f32_gemm_minmax_ukernel_4x8__sse_load1,
@@ -553,6 +566,24 @@
       4 /* mr */, 8 /* nr */);
   }
 
+  static void f32_gemm_5x8__sse_load1(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_f32_gemm_minmax_ukernel_5x8__sse_load1,
+      xnn_f32_igemm_minmax_ukernel_5x8__sse_load1,
+      xnn_f32_gemm_minmax_ukernel_1x8__sse_load1,
+      xnn_f32_igemm_minmax_ukernel_1x8__sse_load1,
+      5 /* mr */, 8 /* nr */);
+  }
+
+  static void f32_gemm_3x8__sse_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_f32_gemm_minmax_ukernel_3x8__sse_dup,
+      xnn_f32_igemm_minmax_ukernel_3x8__sse_dup,
+      xnn_f32_gemm_minmax_ukernel_1x8__sse_dup,
+      xnn_f32_igemm_minmax_ukernel_1x8__sse_dup,
+      3 /* mr */, 8 /* nr */);
+  }
+
   static void f32_gemm_4x8__sse_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_f32_gemm_minmax_ukernel_4x8__sse_dup,
@@ -562,6 +593,24 @@
       4 /* mr */, 8 /* nr */);
   }
 
+  static void f32_gemm_5x8__sse_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_f32_gemm_minmax_ukernel_5x8__sse_dup,
+      xnn_f32_igemm_minmax_ukernel_5x8__sse_dup,
+      xnn_f32_gemm_minmax_ukernel_1x8__sse_dup,
+      xnn_f32_igemm_minmax_ukernel_1x8__sse_dup,
+      5 /* mr */, 8 /* nr */);
+  }
+
+  static void f32_gemm_3x8s4__sse(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_f32_gemm_minmax_ukernel_3x8s4__sse,
+      xnn_f32_igemm_minmax_ukernel_3x8s4__sse,
+      xnn_f32_gemm_minmax_ukernel_1x8s4__sse,
+      xnn_f32_igemm_minmax_ukernel_1x8s4__sse,
+      3 /* mr */, 8 /* nr */, 0 /* log2(kr) */, 2 /* log2(sr) */);
+  }
+
   static void f32_gemm_4x8s4__sse(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_f32_gemm_minmax_ukernel_4x8s4__sse,
@@ -571,6 +620,42 @@
       4 /* mr */, 8 /* nr */, 0 /* log2(kr) */, 2 /* log2(sr) */);
   }
 
+  static void f32_gemm_5x8s4__sse(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_f32_gemm_minmax_ukernel_5x8s4__sse,
+      xnn_f32_igemm_minmax_ukernel_5x8s4__sse,
+      xnn_f32_gemm_minmax_ukernel_1x8s4__sse,
+      xnn_f32_igemm_minmax_ukernel_1x8s4__sse,
+      5 /* mr */, 8 /* nr */, 0 /* log2(kr) */, 2 /* log2(sr) */);
+  }
+
+  static void f32_gemm_3x8__sse2_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup,
+      xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup,
+      xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup,
+      xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup,
+      3 /* mr */, 8 /* nr */);
+  }
+
+  static void f32_gemm_4x8__sse2_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup,
+      xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup,
+      xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup,
+      xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup,
+      4 /* mr */, 8 /* nr */);
+  }
+
+  static void f32_gemm_5x8__sse2_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup,
+      xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup,
+      xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup,
+      xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup,
+      5 /* mr */, 8 /* nr */);
+  }
+
   static void f32_gemm_4x8__avx_broadcast(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_f32_gemm_minmax_ukernel_4x8__avx_broadcast,
@@ -801,11 +886,21 @@
       benchmark::utils::CheckAVX512F);
   }
 
+  BENCHMARK_FP32_END2END(f32_gemm_3x8__sse_load1);
   BENCHMARK_FP32_END2END(f32_gemm_4x8__sse_load1);
+  BENCHMARK_FP32_END2END(f32_gemm_5x8__sse_load1);
 
+  BENCHMARK_FP32_END2END(f32_gemm_3x8__sse_dup);
   BENCHMARK_FP32_END2END(f32_gemm_4x8__sse_dup);
+  BENCHMARK_FP32_END2END(f32_gemm_5x8__sse_dup);
 
+  BENCHMARK_FP32_END2END(f32_gemm_3x8s4__sse);
   BENCHMARK_FP32_END2END(f32_gemm_4x8s4__sse);
+  BENCHMARK_FP32_END2END(f32_gemm_5x8s4__sse);
+
+  BENCHMARK_FP32_END2END(f32_gemm_3x8__sse2_dup);
+  BENCHMARK_FP32_END2END(f32_gemm_4x8__sse2_dup);
+  BENCHMARK_FP32_END2END(f32_gemm_5x8__sse2_dup);
 
   BENCHMARK_FP32_END2END(f32_gemm_4x8__avx_broadcast);
   BENCHMARK_FP32_END2END(f32_gemm_5x8__avx_broadcast);
@@ -836,147 +931,147 @@
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 #if XNN_ARCH_WASMSIMD
-  static void f32_gemm_3x8__wasmsimd_loadsplat_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_3x8__wasmsimd_arm_loadsplat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm,
-      xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm,
+      xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat,
       3 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_4x8__wasmsimd_loadsplat_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_4x8__wasmsimd_arm_loadsplat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm,
-      xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm,
+      xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat,
       4 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_5x8__wasmsimd_loadsplat_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_5x8__wasmsimd_arm_loadsplat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm,
-      xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm,
+      xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat,
       5 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_6x8__wasmsimd_loadsplat_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_6x8__wasmsimd_arm_loadsplat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm,
-      xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm,
+      xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat,
       6 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_3x8__wasmsimd_loadsplat_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_3x8__wasmsimd_x86_loadsplat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86,
-      xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86,
+      xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat,
       3 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_4x8__wasmsimd_loadsplat_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_4x8__wasmsimd_x86_loadsplat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86,
-      xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86,
+      xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat,
       4 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_5x8__wasmsimd_loadsplat_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_5x8__wasmsimd_x86_loadsplat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86,
-      xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86,
+      xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat,
       5 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_6x8__wasmsimd_loadsplat_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_6x8__wasmsimd_x86_loadsplat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86,
-      xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86,
+      xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat,
       6 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_3x8__wasmsimd_splat_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_3x8__wasmsimd_arm_splat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm,
-      xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm,
+      xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat,
+      xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat,
       3 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_4x8__wasmsimd_splat_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_4x8__wasmsimd_arm_splat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm,
-      xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm,
+      xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat,
+      xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat,
       4 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_5x8__wasmsimd_splat_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_5x8__wasmsimd_arm_splat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm,
-      xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm,
+      xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat,
+      xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat,
       5 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_6x8__wasmsimd_splat_arm(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_6x8__wasmsimd_arm_splat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm,
-      xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm,
+      xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat,
+      xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat,
       6 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_3x8__wasmsimd_splat_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_3x8__wasmsimd_x86_splat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86,
-      xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86,
+      xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat,
+      xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat,
       3 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_4x8__wasmsimd_splat_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_4x8__wasmsimd_x86_splat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86,
-      xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86,
+      xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat,
+      xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat,
       4 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_5x8__wasmsimd_splat_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_5x8__wasmsimd_x86_splat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86,
-      xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86,
+      xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat,
+      xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat,
       5 /* mr */, 8 /* nr */);
   }
 
-  static void f32_gemm_6x8__wasmsimd_splat_x86(benchmark::State& state, models::ExecutionPlanFactory model) {
+  static void f32_gemm_6x8__wasmsimd_x86_splat(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
-      xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86,
-      xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86,
-      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86,
-      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86,
+      xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat,
+      xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat,
+      xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat,
+      xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat,
       6 /* mr */, 8 /* nr */);
   }
 
@@ -1052,25 +1147,25 @@
       6 /* mr */, 8 /* nr */, 0 /* log2(kr) */, 2 /* log2(sr) */);
   }
 
-  BENCHMARK_FP32_END2END(f32_gemm_3x8__wasmsimd_loadsplat_arm);
-  BENCHMARK_FP32_END2END(f32_gemm_4x8__wasmsimd_loadsplat_arm);
-  BENCHMARK_FP32_END2END(f32_gemm_5x8__wasmsimd_loadsplat_arm);
-  BENCHMARK_FP32_END2END(f32_gemm_6x8__wasmsimd_loadsplat_arm);
+  BENCHMARK_FP32_END2END(f32_gemm_3x8__wasmsimd_arm_loadsplat);
+  BENCHMARK_FP32_END2END(f32_gemm_4x8__wasmsimd_arm_loadsplat);
+  BENCHMARK_FP32_END2END(f32_gemm_5x8__wasmsimd_arm_loadsplat);
+  BENCHMARK_FP32_END2END(f32_gemm_6x8__wasmsimd_arm_loadsplat);
 
-  BENCHMARK_FP32_END2END(f32_gemm_3x8__wasmsimd_loadsplat_x86);
-  BENCHMARK_FP32_END2END(f32_gemm_4x8__wasmsimd_loadsplat_x86);
-  BENCHMARK_FP32_END2END(f32_gemm_5x8__wasmsimd_loadsplat_x86);
-  BENCHMARK_FP32_END2END(f32_gemm_6x8__wasmsimd_loadsplat_x86);
+  BENCHMARK_FP32_END2END(f32_gemm_3x8__wasmsimd_x86_loadsplat);
+  BENCHMARK_FP32_END2END(f32_gemm_4x8__wasmsimd_x86_loadsplat);
+  BENCHMARK_FP32_END2END(f32_gemm_5x8__wasmsimd_x86_loadsplat);
+  BENCHMARK_FP32_END2END(f32_gemm_6x8__wasmsimd_x86_loadsplat);
 
-  BENCHMARK_FP32_END2END(f32_gemm_3x8__wasmsimd_splat_arm);
-  BENCHMARK_FP32_END2END(f32_gemm_4x8__wasmsimd_splat_arm);
-  BENCHMARK_FP32_END2END(f32_gemm_5x8__wasmsimd_splat_arm);
-  BENCHMARK_FP32_END2END(f32_gemm_6x8__wasmsimd_splat_arm);
+  BENCHMARK_FP32_END2END(f32_gemm_3x8__wasmsimd_arm_splat);
+  BENCHMARK_FP32_END2END(f32_gemm_4x8__wasmsimd_arm_splat);
+  BENCHMARK_FP32_END2END(f32_gemm_5x8__wasmsimd_arm_splat);
+  BENCHMARK_FP32_END2END(f32_gemm_6x8__wasmsimd_arm_splat);
 
-  BENCHMARK_FP32_END2END(f32_gemm_3x8__wasmsimd_splat_x86);
-  BENCHMARK_FP32_END2END(f32_gemm_4x8__wasmsimd_splat_x86);
-  BENCHMARK_FP32_END2END(f32_gemm_5x8__wasmsimd_splat_x86);
-  BENCHMARK_FP32_END2END(f32_gemm_6x8__wasmsimd_splat_x86);
+  BENCHMARK_FP32_END2END(f32_gemm_3x8__wasmsimd_x86_splat);
+  BENCHMARK_FP32_END2END(f32_gemm_4x8__wasmsimd_x86_splat);
+  BENCHMARK_FP32_END2END(f32_gemm_5x8__wasmsimd_x86_splat);
+  BENCHMARK_FP32_END2END(f32_gemm_6x8__wasmsimd_x86_splat);
 
   BENCHMARK_FP32_END2END(f32_gemm_3x8s4__wasmsimd_arm);
   BENCHMARK_FP32_END2END(f32_gemm_4x8s4__wasmsimd_arm);
diff --git a/bench/f32-gemm.cc b/bench/f32-gemm.cc
index 9ccbe4b..3f4bde4 100644
--- a/bench/f32-gemm.cc
+++ b/bench/f32-gemm.cc
@@ -15,8 +15,6 @@
 #include <random>
 #include <vector>
 
-#include <cpuinfo.h>
-
 #include <benchmark/benchmark.h>
 #ifdef BENCHMARK_RUY
 #include "ruy/ruy.h"
@@ -38,10 +36,6 @@
   size_t mr, size_t nr, size_t kr, size_t sr,
   benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
-    return;
-  }
   if (isa_check && !isa_check(state)) {
     return;
   }
@@ -101,7 +95,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
@@ -112,10 +110,6 @@
   size_t mr, size_t nr,
   benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
-    return;
-  }
   if (isa_check && !isa_check(state)) {
     return;
   }
@@ -177,7 +171,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
@@ -188,10 +186,6 @@
   size_t mr, size_t nr,
   benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
-    return;
-  }
   if (isa_check && !isa_check(state)) {
     return;
   }
@@ -257,7 +251,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
@@ -335,7 +333,11 @@
     ruy::Mul(ruy_a, ruy_b, mul_params, &context, &ruy_c);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
@@ -582,23 +584,54 @@
   static void f32_gemm_1x8__sse_load1(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_1x8__sse_load1, 1, 8, 1, 1);
   }
+  static void f32_gemm_3x8__sse_load1(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__sse_load1, 3, 8, 1, 1);
+  }
   static void f32_gemm_4x8__sse_load1(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__sse_load1, 4, 8, 1, 1);
   }
+  static void f32_gemm_5x8__sse_load1(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__sse_load1, 5, 8, 1, 1);
+  }
 
   static void f32_gemm_1x8__sse_dup(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_1x8__sse_dup, 1, 8, 1, 1);
   }
+  static void f32_gemm_3x8__sse_dup(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__sse_dup, 3, 8, 1, 1);
+  }
   static void f32_gemm_4x8__sse_dup(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__sse_dup, 4, 8, 1, 1);
   }
+  static void f32_gemm_5x8__sse_dup(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__sse_dup, 5, 8, 1, 1);
+  }
 
   static void f32_gemm_1x8s4__sse(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_1x8s4__sse, 1, 8, 1, 4);
   }
+  static void f32_gemm_3x8s4__sse(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8s4__sse, 3, 8, 1, 4);
+  }
   static void f32_gemm_4x8s4__sse(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8s4__sse, 4, 8, 1, 4);
   }
+  static void f32_gemm_5x8s4__sse(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8s4__sse, 5, 8, 1, 4);
+  }
+
+  static void f32_gemm_1x8__sse2_dup(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup, 1, 8, 1, 1);
+  }
+  static void f32_gemm_3x8__sse2_dup(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup, 3, 8, 1, 1);
+  }
+  static void f32_gemm_4x8__sse2_dup(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup, 4, 8, 1, 1);
+  }
+  static void f32_gemm_5x8__sse2_dup(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup, 5, 8, 1, 1);
+  }
 
   static void f32_ppmm_4x8_unipass__sse(benchmark::State& state, const char* net) {
     PPMM1PBenchmark(state, xnn_f32_ppmm_minmax_ukernel_4x8__sse, xnn_x32_packx_ukernel_4x__sse, 4, 8);
@@ -699,13 +732,24 @@
   }
 
   BENCHMARK_GEMM(f32_gemm_1x8__sse_load1)
+  BENCHMARK_GEMM(f32_gemm_3x8__sse_load1)
   BENCHMARK_GEMM(f32_gemm_4x8__sse_load1)
+  BENCHMARK_GEMM(f32_gemm_5x8__sse_load1)
 
   BENCHMARK_GEMM(f32_gemm_1x8__sse_dup)
+  BENCHMARK_GEMM(f32_gemm_3x8__sse_dup)
   BENCHMARK_GEMM(f32_gemm_4x8__sse_dup)
+  BENCHMARK_GEMM(f32_gemm_5x8__sse_dup)
 
   BENCHMARK_GEMM(f32_gemm_1x8s4__sse)
+  BENCHMARK_GEMM(f32_gemm_3x8s4__sse)
   BENCHMARK_GEMM(f32_gemm_4x8s4__sse)
+  BENCHMARK_GEMM(f32_gemm_5x8s4__sse)
+
+  BENCHMARK_GEMM(f32_gemm_1x8__sse2_dup)
+  BENCHMARK_GEMM(f32_gemm_3x8__sse2_dup)
+  BENCHMARK_GEMM(f32_gemm_4x8__sse2_dup)
+  BENCHMARK_GEMM(f32_gemm_5x8__sse2_dup)
 
   BENCHMARK_GEMM(f32_ppmm_4x8_unipass__sse)
   BENCHMARK_GEMM(f32_ppmm_4x8_twopass__sse)
@@ -745,68 +789,68 @@
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 #if XNN_ARCH_WASMSIMD
-  static void f32_gemm_3x8__wasmsimd_loadsplat_arm(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm, 3, 8, 1, 1);
+  static void f32_gemm_3x8__wasmsimd_arm_loadsplat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat, 3, 8, 1, 1);
   }
 
-  static void f32_gemm_4x8__wasmsimd_loadsplat_arm(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm, 4, 8, 1, 1);
+  static void f32_gemm_4x8__wasmsimd_arm_loadsplat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat, 4, 8, 1, 1);
   }
 
-  static void f32_gemm_5x8__wasmsimd_loadsplat_arm(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm, 5, 8, 1, 1);
+  static void f32_gemm_5x8__wasmsimd_arm_loadsplat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat, 5, 8, 1, 1);
   }
 
-  static void f32_gemm_6x8__wasmsimd_loadsplat_arm(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm, 6, 8, 1, 1);
+  static void f32_gemm_6x8__wasmsimd_arm_loadsplat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat, 6, 8, 1, 1);
   }
 
-  static void f32_gemm_3x8__wasmsimd_loadsplat_x86(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86, 3, 8, 1, 1);
+  static void f32_gemm_3x8__wasmsimd_x86_loadsplat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat, 3, 8, 1, 1);
   }
 
-  static void f32_gemm_4x8__wasmsimd_loadsplat_x86(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86, 4, 8, 1, 1);
+  static void f32_gemm_4x8__wasmsimd_x86_loadsplat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat, 4, 8, 1, 1);
   }
 
-  static void f32_gemm_5x8__wasmsimd_loadsplat_x86(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86, 5, 8, 1, 1);
+  static void f32_gemm_5x8__wasmsimd_x86_loadsplat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat, 5, 8, 1, 1);
   }
 
-  static void f32_gemm_6x8__wasmsimd_loadsplat_x86(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86, 6, 8, 1, 1);
+  static void f32_gemm_6x8__wasmsimd_x86_loadsplat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat, 6, 8, 1, 1);
   }
 
-  static void f32_gemm_3x8__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm, 3, 8, 1, 1);
+  static void f32_gemm_3x8__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat, 3, 8, 1, 1);
   }
 
-  static void f32_gemm_4x8__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm, 4, 8, 1, 1);
+  static void f32_gemm_4x8__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat, 4, 8, 1, 1);
   }
 
-  static void f32_gemm_5x8__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm, 5, 8, 1, 1);
+  static void f32_gemm_5x8__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat, 5, 8, 1, 1);
   }
 
-  static void f32_gemm_6x8__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm, 6, 8, 1, 1);
+  static void f32_gemm_6x8__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat, 6, 8, 1, 1);
   }
 
-  static void f32_gemm_3x8__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86, 3, 8, 1, 1);
+  static void f32_gemm_3x8__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat, 3, 8, 1, 1);
   }
 
-  static void f32_gemm_4x8__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86, 4, 8, 1, 1);
+  static void f32_gemm_4x8__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat, 4, 8, 1, 1);
   }
 
-  static void f32_gemm_5x8__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86, 5, 8, 1, 1);
+  static void f32_gemm_5x8__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat, 5, 8, 1, 1);
   }
 
-  static void f32_gemm_6x8__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86, 6, 8, 1, 1);
+  static void f32_gemm_6x8__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat, 6, 8, 1, 1);
   }
 
   static void f32_gemm_3x8s4__wasmsimd_arm(benchmark::State& state, const char* net) {
@@ -841,36 +885,36 @@
     GEMMBenchmark(state, xnn_f32_gemm_minmax_ukernel_6x8s4__wasmsimd_x86, 6, 8, 1, 4);
   }
 
-  static void f32_ppmm_4x8_unipass__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    PPMM1PBenchmark(state, xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm, xnn_x32_packx_ukernel_4x__wasmsimd, 4, 8);
+  static void f32_ppmm_4x8_unipass__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    PPMM1PBenchmark(state, xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat, xnn_x32_packx_ukernel_4x__wasmsimd, 4, 8);
   }
-  static void f32_ppmm_4x8_unipass__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    PPMM1PBenchmark(state, xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86, xnn_x32_packx_ukernel_4x__wasmsimd, 4, 8);
+  static void f32_ppmm_4x8_unipass__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    PPMM1PBenchmark(state, xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat, xnn_x32_packx_ukernel_4x__wasmsimd, 4, 8);
   }
 
-  static void f32_ppmm_4x8_twopass__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    PPMM2PBenchmark(state, xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm, xnn_x32_packx_ukernel_4x__wasmsimd, 4, 8);
+  static void f32_ppmm_4x8_twopass__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    PPMM2PBenchmark(state, xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat, xnn_x32_packx_ukernel_4x__wasmsimd, 4, 8);
   }
-  static void f32_ppmm_4x8_twopass__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    PPMM2PBenchmark(state, xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86, xnn_x32_packx_ukernel_4x__wasmsimd, 4, 8);
+  static void f32_ppmm_4x8_twopass__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    PPMM2PBenchmark(state, xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat, xnn_x32_packx_ukernel_4x__wasmsimd, 4, 8);
   }
 
-  BENCHMARK_GEMM(f32_gemm_3x8__wasmsimd_loadsplat_arm)
-  BENCHMARK_GEMM(f32_gemm_4x8__wasmsimd_loadsplat_arm)
-  BENCHMARK_GEMM(f32_gemm_5x8__wasmsimd_loadsplat_arm)
-  BENCHMARK_GEMM(f32_gemm_6x8__wasmsimd_loadsplat_arm)
-  BENCHMARK_GEMM(f32_gemm_3x8__wasmsimd_loadsplat_x86)
-  BENCHMARK_GEMM(f32_gemm_4x8__wasmsimd_loadsplat_x86)
-  BENCHMARK_GEMM(f32_gemm_5x8__wasmsimd_loadsplat_x86)
-  BENCHMARK_GEMM(f32_gemm_6x8__wasmsimd_loadsplat_x86)
-  BENCHMARK_GEMM(f32_gemm_3x8__wasmsimd_splat_arm)
-  BENCHMARK_GEMM(f32_gemm_4x8__wasmsimd_splat_arm)
-  BENCHMARK_GEMM(f32_gemm_5x8__wasmsimd_splat_arm)
-  BENCHMARK_GEMM(f32_gemm_6x8__wasmsimd_splat_arm)
-  BENCHMARK_GEMM(f32_gemm_3x8__wasmsimd_splat_x86)
-  BENCHMARK_GEMM(f32_gemm_4x8__wasmsimd_splat_x86)
-  BENCHMARK_GEMM(f32_gemm_5x8__wasmsimd_splat_x86)
-  BENCHMARK_GEMM(f32_gemm_6x8__wasmsimd_splat_x86)
+  BENCHMARK_GEMM(f32_gemm_3x8__wasmsimd_arm_loadsplat)
+  BENCHMARK_GEMM(f32_gemm_4x8__wasmsimd_arm_loadsplat)
+  BENCHMARK_GEMM(f32_gemm_5x8__wasmsimd_arm_loadsplat)
+  BENCHMARK_GEMM(f32_gemm_6x8__wasmsimd_arm_loadsplat)
+  BENCHMARK_GEMM(f32_gemm_3x8__wasmsimd_x86_loadsplat)
+  BENCHMARK_GEMM(f32_gemm_4x8__wasmsimd_x86_loadsplat)
+  BENCHMARK_GEMM(f32_gemm_5x8__wasmsimd_x86_loadsplat)
+  BENCHMARK_GEMM(f32_gemm_6x8__wasmsimd_x86_loadsplat)
+  BENCHMARK_GEMM(f32_gemm_3x8__wasmsimd_arm_splat)
+  BENCHMARK_GEMM(f32_gemm_4x8__wasmsimd_arm_splat)
+  BENCHMARK_GEMM(f32_gemm_5x8__wasmsimd_arm_splat)
+  BENCHMARK_GEMM(f32_gemm_6x8__wasmsimd_arm_splat)
+  BENCHMARK_GEMM(f32_gemm_3x8__wasmsimd_x86_splat)
+  BENCHMARK_GEMM(f32_gemm_4x8__wasmsimd_x86_splat)
+  BENCHMARK_GEMM(f32_gemm_5x8__wasmsimd_x86_splat)
+  BENCHMARK_GEMM(f32_gemm_6x8__wasmsimd_x86_splat)
   BENCHMARK_GEMM(f32_gemm_3x8s4__wasmsimd_arm)
   BENCHMARK_GEMM(f32_gemm_4x8s4__wasmsimd_arm)
   BENCHMARK_GEMM(f32_gemm_5x8s4__wasmsimd_arm)
@@ -879,10 +923,10 @@
   BENCHMARK_GEMM(f32_gemm_4x8s4__wasmsimd_x86)
   BENCHMARK_GEMM(f32_gemm_5x8s4__wasmsimd_x86)
   BENCHMARK_GEMM(f32_gemm_6x8s4__wasmsimd_x86)
-  BENCHMARK_GEMM(f32_ppmm_4x8_unipass__wasmsimd_splat_arm)
-  BENCHMARK_GEMM(f32_ppmm_4x8_unipass__wasmsimd_splat_x86)
-  BENCHMARK_GEMM(f32_ppmm_4x8_twopass__wasmsimd_splat_arm)
-  BENCHMARK_GEMM(f32_ppmm_4x8_twopass__wasmsimd_splat_x86)
+  BENCHMARK_GEMM(f32_ppmm_4x8_unipass__wasmsimd_arm_splat)
+  BENCHMARK_GEMM(f32_ppmm_4x8_unipass__wasmsimd_x86_splat)
+  BENCHMARK_GEMM(f32_ppmm_4x8_twopass__wasmsimd_arm_splat)
+  BENCHMARK_GEMM(f32_ppmm_4x8_twopass__wasmsimd_x86_splat)
 #endif  // XNN_ARCH_WASMSIMD
 
 static void f32_gemm_1x4__scalar(benchmark::State& state, const char* net) {
diff --git a/bench/f32-igemm.cc b/bench/f32-igemm.cc
index 2a961fa..65c2f8e 100644
--- a/bench/f32-igemm.cc
+++ b/bench/f32-igemm.cc
@@ -10,8 +10,6 @@
 #include <random>
 #include <vector>
 
-#include <cpuinfo.h>
-
 #include <benchmark/benchmark.h>
 #include "bench/conv.h"
 #include "bench/utils.h"
@@ -30,9 +28,6 @@
   uint32_t mr, uint32_t nr, uint32_t kr, uint32_t sr,
   benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
-  }
   if (isa_check && !isa_check(state)) {
     return;
   }
@@ -145,7 +140,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       output_height * output_width *
@@ -408,23 +407,54 @@
   static void f32_igemm_1x8__sse_load1(benchmark::State& state, const char* net) {
     IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_1x8__sse_load1, 1, 8, 1, 1);
   }
+  static void f32_igemm_3x8__sse_load1(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__sse_load1, 3, 8, 1, 1);
+  }
   static void f32_igemm_4x8__sse_load1(benchmark::State& state, const char* net) {
     IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__sse_load1, 4, 8, 1, 1);
   }
+  static void f32_igemm_5x8__sse_load1(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__sse_load1, 5, 8, 1, 1);
+  }
 
   static void f32_igemm_1x8__sse_dup(benchmark::State& state, const char* net) {
     IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_1x8__sse_dup, 1, 8, 1, 1);
   }
+  static void f32_igemm_3x8__sse_dup(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__sse_dup, 3, 8, 1, 1);
+  }
   static void f32_igemm_4x8__sse_dup(benchmark::State& state, const char* net) {
     IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__sse_dup, 4, 8, 1, 1);
   }
+  static void f32_igemm_5x8__sse_dup(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__sse_dup, 5, 8, 1, 1);
+  }
 
   static void f32_igemm_1x8s4__sse(benchmark::State& state, const char* net) {
     IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_1x8s4__sse, 1, 8, 1, 4);
   }
+  static void f32_igemm_3x8s4__sse(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8s4__sse, 3, 8, 1, 4);
+  }
   static void f32_igemm_4x8s4__sse(benchmark::State& state, const char* net) {
     IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8s4__sse, 4, 8, 1, 4);
   }
+  static void f32_igemm_5x8s4__sse(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8s4__sse, 5, 8, 1, 4);
+  }
+
+  static void f32_igemm_1x8__sse2_dup(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup, 1, 8, 1, 1);
+  }
+  static void f32_igemm_3x8__sse2_dup(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup, 3, 8, 1, 1);
+  }
+  static void f32_igemm_4x8__sse2_dup(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup, 4, 8, 1, 1);
+  }
+  static void f32_igemm_5x8__sse2_dup(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup, 5, 8, 1, 1);
+  }
 
   static void f32_igemm_1x8__avx_broadcast(benchmark::State& state, const char* net) {
     IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_1x8__avx_broadcast, 1, 8, 1, 1, benchmark::utils::CheckAVX);
@@ -481,13 +511,24 @@
   }
 
   BENCHMARK_CONV(f32_igemm_1x8__sse_load1)
+  BENCHMARK_CONV(f32_igemm_3x8__sse_load1)
   BENCHMARK_CONV(f32_igemm_4x8__sse_load1)
+  BENCHMARK_CONV(f32_igemm_5x8__sse_load1)
 
   BENCHMARK_CONV(f32_igemm_1x8__sse_dup)
+  BENCHMARK_CONV(f32_igemm_3x8__sse_dup)
   BENCHMARK_CONV(f32_igemm_4x8__sse_dup)
+  BENCHMARK_CONV(f32_igemm_5x8__sse_dup)
 
   BENCHMARK_CONV(f32_igemm_1x8s4__sse)
+  BENCHMARK_CONV(f32_igemm_3x8s4__sse)
   BENCHMARK_CONV(f32_igemm_4x8s4__sse)
+  BENCHMARK_CONV(f32_igemm_5x8s4__sse)
+
+  BENCHMARK_CONV(f32_igemm_1x8__sse2_dup)
+  BENCHMARK_CONV(f32_igemm_3x8__sse2_dup)
+  BENCHMARK_CONV(f32_igemm_4x8__sse2_dup)
+  BENCHMARK_CONV(f32_igemm_5x8__sse2_dup)
 
   BENCHMARK_CONV(f32_igemm_1x8__avx_broadcast)
   BENCHMARK_CONV(f32_igemm_4x8__avx_broadcast)
@@ -511,68 +552,68 @@
 #endif  /* XNN_ARCH_X86 || XNN_ARCH_X86_64 */
 
 #if XNN_ARCH_WASMSIMD
-  static void f32_igemm_3x8__wasmsimd_loadsplat_arm(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm, 3, 8, 1, 1);
+  static void f32_igemm_3x8__wasmsimd_arm_loadsplat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat, 3, 8, 1, 1);
   }
 
-  static void f32_igemm_4x8__wasmsimd_loadsplat_arm(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm, 4, 8, 1, 1);
+  static void f32_igemm_4x8__wasmsimd_arm_loadsplat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat, 4, 8, 1, 1);
   }
 
-  static void f32_igemm_5x8__wasmsimd_loadsplat_arm(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm, 5, 8, 1, 1);
+  static void f32_igemm_5x8__wasmsimd_arm_loadsplat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat, 5, 8, 1, 1);
   }
 
-  static void f32_igemm_6x8__wasmsimd_loadsplat_arm(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm, 6, 8, 1, 1);
+  static void f32_igemm_6x8__wasmsimd_arm_loadsplat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat, 6, 8, 1, 1);
   }
 
-  static void f32_igemm_3x8__wasmsimd_loadsplat_x86(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86, 3, 8, 1, 1);
+  static void f32_igemm_3x8__wasmsimd_x86_loadsplat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat, 3, 8, 1, 1);
   }
 
-  static void f32_igemm_4x8__wasmsimd_loadsplat_x86(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86, 4, 8, 1, 1);
+  static void f32_igemm_4x8__wasmsimd_x86_loadsplat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat, 4, 8, 1, 1);
   }
 
-  static void f32_igemm_5x8__wasmsimd_loadsplat_x86(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86, 5, 8, 1, 1);
+  static void f32_igemm_5x8__wasmsimd_x86_loadsplat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat, 5, 8, 1, 1);
   }
 
-  static void f32_igemm_6x8__wasmsimd_loadsplat_x86(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86, 6, 8, 1, 1);
+  static void f32_igemm_6x8__wasmsimd_x86_loadsplat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat, 6, 8, 1, 1);
   }
 
-  static void f32_igemm_3x8__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm, 3, 8, 1, 1);
+  static void f32_igemm_3x8__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat, 3, 8, 1, 1);
   }
 
-  static void f32_igemm_4x8__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm, 4, 8, 1, 1);
+  static void f32_igemm_4x8__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat, 4, 8, 1, 1);
   }
 
-  static void f32_igemm_5x8__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm, 5, 8, 1, 1);
+  static void f32_igemm_5x8__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat, 5, 8, 1, 1);
   }
 
-  static void f32_igemm_6x8__wasmsimd_splat_arm(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm, 6, 8, 1, 1);
+  static void f32_igemm_6x8__wasmsimd_arm_splat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat, 6, 8, 1, 1);
   }
 
-  static void f32_igemm_3x8__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86, 3, 8, 1, 1);
+  static void f32_igemm_3x8__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat, 3, 8, 1, 1);
   }
 
-  static void f32_igemm_4x8__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86, 4, 8, 1, 1);
+  static void f32_igemm_4x8__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat, 4, 8, 1, 1);
   }
 
-  static void f32_igemm_5x8__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86, 5, 8, 1, 1);
+  static void f32_igemm_5x8__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat, 5, 8, 1, 1);
   }
 
-  static void f32_igemm_6x8__wasmsimd_splat_x86(benchmark::State& state, const char* net) {
-    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86, 6, 8, 1, 1);
+  static void f32_igemm_6x8__wasmsimd_x86_splat(benchmark::State& state, const char* net) {
+    IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat, 6, 8, 1, 1);
   }
 
   static void f32_igemm_3x8s4__wasmsimd_arm(benchmark::State& state, const char* net) {
@@ -607,22 +648,22 @@
     IGEMMBenchmark(state, xnn_f32_igemm_minmax_ukernel_6x8s4__wasmsimd_x86, 6, 8, 1, 4);
   }
 
-  BENCHMARK_CONV(f32_igemm_3x8__wasmsimd_loadsplat_arm)
-  BENCHMARK_CONV(f32_igemm_4x8__wasmsimd_loadsplat_arm)
-  BENCHMARK_CONV(f32_igemm_5x8__wasmsimd_loadsplat_arm)
-  BENCHMARK_CONV(f32_igemm_6x8__wasmsimd_loadsplat_arm)
-  BENCHMARK_CONV(f32_igemm_3x8__wasmsimd_loadsplat_x86)
-  BENCHMARK_CONV(f32_igemm_4x8__wasmsimd_loadsplat_x86)
-  BENCHMARK_CONV(f32_igemm_5x8__wasmsimd_loadsplat_x86)
-  BENCHMARK_CONV(f32_igemm_6x8__wasmsimd_loadsplat_x86)
-  BENCHMARK_CONV(f32_igemm_3x8__wasmsimd_splat_arm)
-  BENCHMARK_CONV(f32_igemm_4x8__wasmsimd_splat_arm)
-  BENCHMARK_CONV(f32_igemm_5x8__wasmsimd_splat_arm)
-  BENCHMARK_CONV(f32_igemm_6x8__wasmsimd_splat_arm)
-  BENCHMARK_CONV(f32_igemm_3x8__wasmsimd_splat_x86)
-  BENCHMARK_CONV(f32_igemm_4x8__wasmsimd_splat_x86)
-  BENCHMARK_CONV(f32_igemm_5x8__wasmsimd_splat_x86)
-  BENCHMARK_CONV(f32_igemm_6x8__wasmsimd_splat_x86)
+  BENCHMARK_CONV(f32_igemm_3x8__wasmsimd_arm_loadsplat)
+  BENCHMARK_CONV(f32_igemm_4x8__wasmsimd_arm_loadsplat)
+  BENCHMARK_CONV(f32_igemm_5x8__wasmsimd_arm_loadsplat)
+  BENCHMARK_CONV(f32_igemm_6x8__wasmsimd_arm_loadsplat)
+  BENCHMARK_CONV(f32_igemm_3x8__wasmsimd_x86_loadsplat)
+  BENCHMARK_CONV(f32_igemm_4x8__wasmsimd_x86_loadsplat)
+  BENCHMARK_CONV(f32_igemm_5x8__wasmsimd_x86_loadsplat)
+  BENCHMARK_CONV(f32_igemm_6x8__wasmsimd_x86_loadsplat)
+  BENCHMARK_CONV(f32_igemm_3x8__wasmsimd_arm_splat)
+  BENCHMARK_CONV(f32_igemm_4x8__wasmsimd_arm_splat)
+  BENCHMARK_CONV(f32_igemm_5x8__wasmsimd_arm_splat)
+  BENCHMARK_CONV(f32_igemm_6x8__wasmsimd_arm_splat)
+  BENCHMARK_CONV(f32_igemm_3x8__wasmsimd_x86_splat)
+  BENCHMARK_CONV(f32_igemm_4x8__wasmsimd_x86_splat)
+  BENCHMARK_CONV(f32_igemm_5x8__wasmsimd_x86_splat)
+  BENCHMARK_CONV(f32_igemm_6x8__wasmsimd_x86_splat)
   BENCHMARK_CONV(f32_igemm_3x8s4__wasmsimd_arm)
   BENCHMARK_CONV(f32_igemm_4x8s4__wasmsimd_arm)
   BENCHMARK_CONV(f32_igemm_5x8s4__wasmsimd_arm)
diff --git a/bench/f32-im2col-gemm.cc b/bench/f32-im2col-gemm.cc
index 4843925..2c98527 100644
--- a/bench/f32-im2col-gemm.cc
+++ b/bench/f32-im2col-gemm.cc
@@ -10,8 +10,6 @@
 #include <random>
 #include <vector>
 
-#include <cpuinfo.h>
-
 #include <benchmark/benchmark.h>
 #include "bench/conv.h"
 #include "bench/utils.h"
@@ -26,10 +24,10 @@
 
 static void Im2ColGEMMBenchmark(benchmark::State& state,
   xnn_f32_gemm_minmax_ukernel_function f32_gemm,
-  uint32_t mr, uint32_t nr, uint32_t kr, uint32_t sr)
+  uint32_t mr, uint32_t nr, uint32_t kr, uint32_t sr,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
+  if (isa_check && !isa_check(state)) {
     return;
   }
 
@@ -124,7 +122,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 *
       output_height * output_width *
diff --git a/bench/f32-raddexpminusmax.cc b/bench/f32-raddexpminusmax.cc
index f7ef890..50d4633 100644
--- a/bench/f32-raddexpminusmax.cc
+++ b/bench/f32-raddexpminusmax.cc
@@ -28,17 +28,17 @@
     return;
   }
 
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
   const size_t cache_line_size_max = 128;
-  const size_t packed_n = benchmark::utils::RoundUp(n, cache_line_size_max / sizeof(float));
+  const size_t packed_elements = benchmark::utils::RoundUp(elements, cache_line_size_max / sizeof(float));
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-1000.0f, 1000.0f), std::ref(rng));
 
   const size_t num_buffers = 1 +
-    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_n * sizeof(float));
-  std::vector<float, AlignedAllocator<float, 64>> x(n);
+    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_elements * sizeof(float));
+  std::vector<float, AlignedAllocator<float, 64>> x(elements);
 
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
@@ -48,21 +48,28 @@
   for (auto _ : state) {
     state.PauseTiming();
     float x_max = nanf("");
-    rmax(n * sizeof(float), x.data(), &x_max);
+    rmax(elements * sizeof(float), x.data(), &x_max);
     if (++buffer_index == num_buffers) {
       buffer_index = 0;
     }
     state.ResumeTiming();
 
     float y_sum = nanf("");
-    raddexpminusmax(n * sizeof(float), x.data(), &y_sum, x_max);
+    raddexpminusmax(elements * sizeof(float), x.data(), &y_sum, x_max);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * sizeof(float) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 static void CharacteristicArguments(benchmark::internal::Benchmark* b) {
diff --git a/bench/f32-raddextexp.cc b/bench/f32-raddextexp.cc
index 85965e0..6196e01 100644
--- a/bench/f32-raddextexp.cc
+++ b/bench/f32-raddextexp.cc
@@ -26,17 +26,17 @@
     return;
   }
 
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
   const size_t cache_line_size_max = 128;
-  const size_t packed_n = benchmark::utils::RoundUp(n, cache_line_size_max / sizeof(float));
+  const size_t packed_elements = benchmark::utils::RoundUp(elements, cache_line_size_max / sizeof(float));
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-1000.0f, 1000.0f), std::ref(rng));
 
   const size_t num_buffers = 1 +
-    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_n * sizeof(float));
-  std::vector<float, AlignedAllocator<float, 64>> x(n);
+    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_elements * sizeof(float));
+  std::vector<float, AlignedAllocator<float, 64>> x(elements);
 
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
@@ -49,14 +49,21 @@
     }
 
     float y_sum[2] = { nanf(""), nanf("") };
-    raddextexp(n * sizeof(float), x.data(), y_sum);
+    raddextexp(elements * sizeof(float), x.data(), y_sum);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * sizeof(float) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 static void CharacteristicArguments(benchmark::internal::Benchmark* b) {
diff --git a/bench/f32-raddstoreexpminusmax.cc b/bench/f32-raddstoreexpminusmax.cc
index 96da46f..c5fe76d 100644
--- a/bench/f32-raddstoreexpminusmax.cc
+++ b/bench/f32-raddstoreexpminusmax.cc
@@ -28,18 +28,18 @@
     return;
   }
 
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
   const size_t cache_line_size_max = 128;
-  const size_t packed_n = benchmark::utils::RoundUp(n, cache_line_size_max / sizeof(float));
+  const size_t packed_elements = benchmark::utils::RoundUp(elements, cache_line_size_max / sizeof(float));
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-1000.0f, 1000.0f), std::ref(rng));
 
   const size_t num_buffers = 1 +
-    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_n * sizeof(float));
-  std::vector<float, AlignedAllocator<float, 64>> x(n);
-  std::vector<float, AlignedAllocator<float, 64>> y(packed_n * num_buffers);
+    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_elements * sizeof(float));
+  std::vector<float, AlignedAllocator<float, 64>> x(elements);
+  std::vector<float, AlignedAllocator<float, 64>> y(packed_elements * num_buffers);
 
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
@@ -49,21 +49,28 @@
   for (auto _ : state) {
     state.PauseTiming();
     float x_max = nanf("");
-    rmax(n * sizeof(float), x.data(), &x_max);
+    rmax(elements * sizeof(float), x.data(), &x_max);
     if (++buffer_index == num_buffers) {
       buffer_index = 0;
     }
     state.ResumeTiming();
 
     float y_sum = nanf("");
-    raddstoreexpminusmax(n * sizeof(float), x.data(), y.data() + buffer_index * packed_n, &y_sum, x_max);
+    raddstoreexpminusmax(elements * sizeof(float), x.data(), y.data() + buffer_index * packed_elements, &y_sum, x_max);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * 2 * sizeof(float) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 static void CharacteristicArguments(benchmark::internal::Benchmark* b) {
diff --git a/bench/f32-relu.cc b/bench/f32-relu.cc
index 7e30296..34d979b 100644
--- a/bench/f32-relu.cc
+++ b/bench/f32-relu.cc
@@ -19,30 +19,40 @@
 
 static void f32_relu(
   benchmark::State& state,
-  xnn_f32_relu_ukernel_function f32_relu)
+  xnn_f32_relu_ukernel_function f32_relu,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  const size_t n = state.range(0);
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
+  const size_t elements = state.range(0);
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-10.0f, 10.0f), std::ref(rng));
 
-  std::vector<float, AlignedAllocator<float, 64>> x(n);
+  std::vector<float, AlignedAllocator<float, 64>> x(elements);
   std::generate(x.begin(), x.end(), std::ref(f32rng));
-  std::vector<float, AlignedAllocator<float, 64>> y(n);
+  std::vector<float, AlignedAllocator<float, 64>> y(elements);
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
   for (auto _ : state) {
-    f32_relu(n * sizeof(float), x.data(), y.data(), NULL);
+    f32_relu(elements * sizeof(float), x.data(), y.data(), NULL);
   }
 
-    state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
-    state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+  const size_t elements_per_iteration = elements;
+  state.counters["elements"] =
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
 
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n * sizeof(float), benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
@@ -56,33 +66,33 @@
     ->Range(1000, 100000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_relu, avx_x8, xnn_f32_relu_ukernel__avx_x8)
+  BENCHMARK_CAPTURE(f32_relu, avx_x8, xnn_f32_relu_ukernel__avx_x8, benchmark::utils::CheckAVX)
     ->RangeMultiplier(10)
     ->Range(1000, 100000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_relu, avx_x16, xnn_f32_relu_ukernel__avx_x16)
+  BENCHMARK_CAPTURE(f32_relu, avx_x16, xnn_f32_relu_ukernel__avx_x16, benchmark::utils::CheckAVX)
     ->RangeMultiplier(10)
     ->Range(1000, 100000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_relu, avx512f_x16, xnn_f32_relu_ukernel__avx512f_x16)
+  BENCHMARK_CAPTURE(f32_relu, avx512f_x16, xnn_f32_relu_ukernel__avx512f_x16, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 100000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_relu, avx512f_x32, xnn_f32_relu_ukernel__avx512f_x32)
+  BENCHMARK_CAPTURE(f32_relu, avx512f_x32, xnn_f32_relu_ukernel__avx512f_x32, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 100000000)
     ->UseRealTime();
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
-  BENCHMARK_CAPTURE(f32_relu, neon_x4, xnn_f32_relu_ukernel__neon_x4)
+  BENCHMARK_CAPTURE(f32_relu, neon_x4, xnn_f32_relu_ukernel__neon_x4, benchmark::utils::CheckNEON)
     ->RangeMultiplier(10)
     ->Range(1000, 100000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_relu, neon_x8, xnn_f32_relu_ukernel__neon_x8)
+  BENCHMARK_CAPTURE(f32_relu, neon_x8, xnn_f32_relu_ukernel__neon_x8, benchmark::utils::CheckNEON)
     ->RangeMultiplier(10)
     ->Range(1000, 100000000)
     ->UseRealTime();
diff --git a/bench/f32-rmax.cc b/bench/f32-rmax.cc
index 521c15c..47995d4 100644
--- a/bench/f32-rmax.cc
+++ b/bench/f32-rmax.cc
@@ -19,29 +19,39 @@
 
 static void f32_rmax(
   benchmark::State& state,
-  xnn_f32_rmax_ukernel_function f32_rmax)
+  xnn_f32_rmax_ukernel_function f32_rmax,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  const size_t n = state.range(0);
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
+  const size_t elements = state.range(0);
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-10.0f, 10.0f), std::ref(rng));
 
-  std::vector<float, AlignedAllocator<float, 64>> x(n);
+  std::vector<float, AlignedAllocator<float, 64>> x(elements);
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
   float y;
   for (auto _ : state) {
-    f32_rmax(n * sizeof(float), x.data(), &y);
+    f32_rmax(elements * sizeof(float), x.data(), &y);
   }
 
-    state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
-    state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+  const size_t elements_per_iteration = elements;
+  state.counters["elements"] =
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
 
+  const size_t bytes_per_iteration = elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n * sizeof(float), benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
@@ -50,19 +60,19 @@
     ->Range(1000, 100000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_rmax, avx, xnn_f32_rmax_ukernel__avx)
+  BENCHMARK_CAPTURE(f32_rmax, avx, xnn_f32_rmax_ukernel__avx, benchmark::utils::CheckAVX)
     ->RangeMultiplier(10)
     ->Range(1000, 100000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_rmax, avx512f, xnn_f32_rmax_ukernel__avx512f)
+  BENCHMARK_CAPTURE(f32_rmax, avx512f, xnn_f32_rmax_ukernel__avx512f, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 100000000)
     ->UseRealTime();
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
-  BENCHMARK_CAPTURE(f32_rmax, neon, xnn_f32_rmax_ukernel__neon)
+  BENCHMARK_CAPTURE(f32_rmax, neon, xnn_f32_rmax_ukernel__neon, benchmark::utils::CheckNEON)
     ->RangeMultiplier(10)
     ->Range(1000, 100000000)
     ->UseRealTime();
diff --git a/bench/f32-sigmoid.cc b/bench/f32-sigmoid.cc
index 86c7e84..76cf5e3 100644
--- a/bench/f32-sigmoid.cc
+++ b/bench/f32-sigmoid.cc
@@ -19,8 +19,13 @@
 
 static void f32_sigmoid(
   benchmark::State& state,
-  xnn_f32_vunary_ukernel_function sigmoid)
+  xnn_f32_vunary_ukernel_function sigmoid,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
   const size_t elements = state.range(0);
 
   std::random_device random_device;
@@ -36,7 +41,10 @@
     sigmoid(elements * sizeof(float), x.data(), y.data(), nullptr /* params */);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = elements;
   state.counters["elements"] =
@@ -125,11 +133,6 @@
 #endif  // XNN_ARCH_ARM64
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
-  BENCHMARK_CAPTURE(f32_sigmoid, neon_frac_p9_p10_nr1recps_x16, xnn_f32_sigmoid_ukernel__neon_frac_p9_p10_nr1recps_x16)
-    ->RangeMultiplier(10)
-    ->Range(1000, 1000000)
-    ->UseRealTime();
-
   BENCHMARK_CAPTURE(f32_sigmoid, neonfma_rr1_p5_nr2fma_x4, xnn_f32_sigmoid_ukernel__neonfma_rr1_p5_nr2fma_x4)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
@@ -432,454 +435,454 @@
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x16, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x16, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x16, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x32, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x32, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x48, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x48)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x48, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x48, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x64, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x64)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x64, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x64, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x80, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x80)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x80, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x80, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x96, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x96)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x96, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x96, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x112, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x112)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x112, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x112, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x128, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x128)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_div_x128, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x128, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x16, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x16, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x16, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x32, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x32)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x32, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x32, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x48, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x48)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x48, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x48, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x64, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x64)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x64, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x64, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x80, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x80)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x80, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x80, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x96, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x96)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x96, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x96, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x112, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x112)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x112, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x112, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x128, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x128)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_p5_scalef_nr1fma_x128, xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x128, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x16, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x16, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x16, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x32, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x32)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x32, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x32, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x48, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x48)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x48, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x48, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x64, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x64)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x64, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x64, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x80, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x80)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x80, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x80, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x96, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x96)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x96, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x96, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x112, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x112)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x112, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x112, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x128, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x128)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_div_x128, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x128, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x16, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x16, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x16, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x32, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x32)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x32, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x32, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x48, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x48)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x48, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x48, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x64, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x64)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x64, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x64, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x80, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x80)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x80, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x80, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x96, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x96)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x96, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x96, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x112, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x112)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x112, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x112, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x128, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x128)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut16_p3_perm_scalef_nr1fma_x128, xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x128, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x16, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x16, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x16, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x32, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x32)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x32, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x32, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x48, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x48)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x48, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x48, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x64, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x64)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x64, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x64, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x80, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x80)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x80, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x80, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x96, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x96)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x96, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x96, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x112, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x112)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x112, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x112, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x128, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x128)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_div_x128, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x128, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x16, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x16, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x16, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x32, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x32)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x32, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x32, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x48, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x48)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x48, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x48, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x64, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x64)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x64, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x64, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x80, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x80)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x80, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x80, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x96, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x96)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x96, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x96, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x112, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x112)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x112, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x112, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x128, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x128)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx512f_lut32_p2_perm2_scalef_nr1fma_x128, xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x128, benchmark::utils::CheckAVX512F)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x8, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x8)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x16, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x16)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x24, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x24)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x32, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x32)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x40, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x40)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x48, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x48)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x56, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x56)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x64, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x64)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x72, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x80, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x80)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x8, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x8)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x16, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x16)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x24, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x24)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x32, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x32)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x40, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x40)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x48, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x48)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x56, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x56)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x64, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x64)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x72, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x80, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80)
-      ->RangeMultiplier(10)
-      ->Range(1000, 1000000)
-      ->UseRealTime();
-
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x8, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x8, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x16, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x16, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x24, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x24, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x32, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x32)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x32, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x32, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x40, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x40)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x40, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x40, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x48, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x48, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x56, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x56, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x64, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x64, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x72, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x72, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x80, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_div_x80, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x8, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x8)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x8, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x8, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x16, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x16, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x16, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x24, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x24)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x24, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x24, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x32, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x32)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x32, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x32, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x40, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x40)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x40, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x40, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x48, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x48)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x48, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x48, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x56, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x56)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x56, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x56, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x64, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x64)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x64, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x64, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x72, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x72, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x80, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr1fma_x80, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x8, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x8)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x8, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x8, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x16, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x16, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x16, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x24, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x24)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x24, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x24, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x32, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x32, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x40, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x40, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x48, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x48, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x56, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x56, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x64, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x64)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x64, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x64, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x72, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x72, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x80, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx2_p5_nr2fma_x80, xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80, benchmark::utils::CheckAVX2)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x4, xnn_f32_sigmoid_ukernel__sse41_p5_div_x4)
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x8, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x8, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x16, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x16, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x24, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x24, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x32, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x32, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x40, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x40, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x48, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x48, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x56, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x56, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x64, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x64, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x72, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_div_x80, xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x80, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x8, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x8, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x16, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x16, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x24, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x24, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x32, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x32, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x40, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x40, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x48, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x48, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x56, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x56, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x64, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x64, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x72, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_sigmoid, avx_p5_nr2_x80, xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80, benchmark::utils::CheckAVX)
+      ->RangeMultiplier(10)
+      ->Range(1000, 1000000)
+      ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x4, xnn_f32_sigmoid_ukernel__sse41_p5_div_x4, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x8, xnn_f32_sigmoid_ukernel__sse41_p5_div_x8)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x8, xnn_f32_sigmoid_ukernel__sse41_p5_div_x8, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x12, xnn_f32_sigmoid_ukernel__sse41_p5_div_x12)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x12, xnn_f32_sigmoid_ukernel__sse41_p5_div_x12, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x16, xnn_f32_sigmoid_ukernel__sse41_p5_div_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x16, xnn_f32_sigmoid_ukernel__sse41_p5_div_x16, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x20, xnn_f32_sigmoid_ukernel__sse41_p5_div_x20)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x20, xnn_f32_sigmoid_ukernel__sse41_p5_div_x20, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x24, xnn_f32_sigmoid_ukernel__sse41_p5_div_x24)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_p5_div_x24, xnn_f32_sigmoid_ukernel__sse41_p5_div_x24, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
 
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x4, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x4)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x4, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x4, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x8, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x8)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x8, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x8, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x12, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x12)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x12, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x12, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x16, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x16)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x16, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x16, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x20, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x20)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x20, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x20, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
-  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x24, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x24)
+  BENCHMARK_CAPTURE(f32_sigmoid, sse41_lut64_p2_div_x24, xnn_f32_sigmoid_ukernel__sse41_lut64_p2_div_x24, benchmark::utils::CheckSSE41)
     ->RangeMultiplier(10)
     ->Range(1000, 1000000)
     ->UseRealTime();
diff --git a/bench/f32-softmax.cc b/bench/f32-softmax.cc
index 6f6ed94..ecc4d10 100644
--- a/bench/f32-softmax.cc
+++ b/bench/f32-softmax.cc
@@ -27,18 +27,18 @@
 static void DNNLSoftArgMax(
   benchmark::State& state)
 {
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
   const size_t cache_line_size_max = 128;
-  const size_t packed_n = benchmark::utils::RoundUp(n, cache_line_size_max / sizeof(float));
+  const size_t packed_elements = benchmark::utils::RoundUp(elements, cache_line_size_max / sizeof(float));
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-1000.0f, 1000.0f), std::ref(rng));
 
   const size_t num_buffers = 1 +
-    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_n * sizeof(float));
-  std::vector<float> x(n);
-  std::vector<float> y(packed_n * num_buffers);
+    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_elements * sizeof(float));
+  std::vector<float> x(elements);
+  std::vector<float> y(packed_elements * num_buffers);
 
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
@@ -48,7 +48,7 @@
     return;
   }
 
-  dnnl_dim_t input_output_shape[1] = { static_cast<int>(n) };
+  dnnl_dim_t input_output_shape[1] = { static_cast<int>(elements) };
 
   dnnl_memory_desc_t memory_descriptor = { 0 };
   if (dnnl_memory_desc_init_by_tag(
@@ -162,11 +162,18 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * 2 * sizeof(float) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 #endif  // BENCHMARK_INTEL_DNNL
 
@@ -181,18 +188,18 @@
     return;
   }
 
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
   const size_t cache_line_size_max = 128;
-  const size_t packed_n = benchmark::utils::RoundUp(n, cache_line_size_max / sizeof(float));
+  const size_t packed_elements = benchmark::utils::RoundUp(elements, cache_line_size_max / sizeof(float));
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-1000.0f, 1000.0f), std::ref(rng));
 
   const size_t num_buffers = 1 +
-    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_n * sizeof(float));
-  std::vector<float> x(n);
-  std::vector<float> y(packed_n * num_buffers);
+    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_elements * sizeof(float));
+  std::vector<float> x(elements);
+  std::vector<float> y(packed_elements * num_buffers);
 
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
@@ -207,10 +214,10 @@
 
     const auto start = std::chrono::high_resolution_clock::now();
     float x_max = nanf("");
-    rmax(n * sizeof(float), x.data(), &x_max);
+    rmax(elements * sizeof(float), x.data(), &x_max);
     float y_sum = nanf("");
-    raddexpminusmax(n * sizeof(float), x.data(), &y_sum, x_max);
-    vscaleexpminusmax(n * sizeof(float), x.data(), y.data() + packed_n * buffer_index, x_max, 1.0f / y_sum);
+    raddexpminusmax(elements * sizeof(float), x.data(), &y_sum, x_max);
+    vscaleexpminusmax(elements * sizeof(float), x.data(), y.data() + packed_elements * buffer_index, x_max, 1.0f / y_sum);
     const auto end = std::chrono::high_resolution_clock::now();
 
     const auto elapsed_seconds =
@@ -218,11 +225,18 @@
     state.SetIterationTime(elapsed_seconds.count());
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * 2 * sizeof(float) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 static void ThreePassSoftMaxWithReloading(
@@ -236,18 +250,18 @@
     return;
   }
 
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
   const size_t cache_line_size_max = 128;
-  const size_t packed_n = benchmark::utils::RoundUp(n, cache_line_size_max / sizeof(float));
+  const size_t packed_elements = benchmark::utils::RoundUp(elements, cache_line_size_max / sizeof(float));
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-1000.0f, 1000.0f), std::ref(rng));
 
   const size_t num_buffers = 1 +
-    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_n * sizeof(float));
-  std::vector<float> x(n);
-  std::vector<float> y(packed_n * num_buffers);
+    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_elements * sizeof(float));
+  std::vector<float> x(elements);
+  std::vector<float> y(packed_elements * num_buffers);
 
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
@@ -262,10 +276,10 @@
 
     const auto start = std::chrono::high_resolution_clock::now();
     float x_max = nanf("");
-    rmax(n * sizeof(float), x.data(), &x_max);
+    rmax(elements * sizeof(float), x.data(), &x_max);
     float y_sum = nanf("");
-    raddstoreexpminusmax(n * sizeof(float), x.data(), y.data() + packed_n * buffer_index, &y_sum, x_max);
-    vscale(n * sizeof(float), y.data() + packed_n * buffer_index, y.data() + packed_n * buffer_index, 1.0f / y_sum);
+    raddstoreexpminusmax(elements * sizeof(float), x.data(), y.data() + packed_elements * buffer_index, &y_sum, x_max);
+    vscale(elements * sizeof(float), y.data() + packed_elements * buffer_index, y.data() + packed_elements * buffer_index, 1.0f / y_sum);
     const auto end = std::chrono::high_resolution_clock::now();
 
     const auto elapsed_seconds =
@@ -273,11 +287,18 @@
     state.SetIterationTime(elapsed_seconds.count());
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * 2 * sizeof(float) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 static void TwoPassSoftMax(
@@ -290,18 +311,18 @@
     return;
   }
 
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
   const size_t cache_line_size_max = 128;
-  const size_t packed_n = benchmark::utils::RoundUp(n, cache_line_size_max / sizeof(float));
+  const size_t packed_elements = benchmark::utils::RoundUp(elements, cache_line_size_max / sizeof(float));
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-1000.0f, 1000.0f), std::ref(rng));
 
   const size_t num_buffers = 1 +
-    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_n * sizeof(float));
-  std::vector<float> x(n);
-  std::vector<float> y(packed_n * num_buffers);
+    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_elements * sizeof(float));
+  std::vector<float> x(elements);
+  std::vector<float> y(packed_elements * num_buffers);
 
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
@@ -316,8 +337,8 @@
 
     const auto start = std::chrono::high_resolution_clock::now();
     float scale[2];
-    raddextexp(n * sizeof(float), x.data(), scale);
-    vscaleextexp(n * sizeof(float), x.data(), y.data() + packed_n * buffer_index, 1.0f / scale[0], -scale[1]);
+    raddextexp(elements * sizeof(float), x.data(), scale);
+    vscaleextexp(elements * sizeof(float), x.data(), y.data() + packed_elements * buffer_index, 1.0f / scale[0], -scale[1]);
     const auto end = std::chrono::high_resolution_clock::now();
 
     const auto elapsed_seconds =
@@ -325,11 +346,18 @@
     state.SetIterationTime(elapsed_seconds.count());
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * 2 * sizeof(float) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 static void CharacteristicArguments(benchmark::internal::Benchmark* b) {
diff --git a/bench/f32-spmm.cc b/bench/f32-spmm.cc
index c720072..5b6679d 100644
--- a/bench/f32-spmm.cc
+++ b/bench/f32-spmm.cc
@@ -10,10 +10,8 @@
 #include <random>
 #include <vector>
 
-#include <cpuinfo.h>
-
 #include <benchmark/benchmark.h>
-#include "bench/gemm.h"
+#include "bench/spmm.h"
 #include "bench/utils.h"
 #include <xnnpack/AlignedAllocator.h>
 #include <xnnpack/common.h>
@@ -23,10 +21,10 @@
 
 
 static void SpMMBenchmark(benchmark::State& state,
-  xnn_f32_spmm_minmax_ukernel_function spmm, uint32_t mr, uint32_t nr, float sparsity)
+  xnn_f32_spmm_minmax_ukernel_function spmm, uint32_t mr, uint32_t nr, float sparsity,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
+  if (isa_check && !isa_check(state)) {
     return;
   }
 
@@ -150,7 +148,11 @@
       &params);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["FLOPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * num_nonzeroes, benchmark::Counter::kIsRate);
 
@@ -159,6 +161,74 @@
 }
 
 
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  static void spmm80_4x1__neon(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_4x1__neon, 4, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_8x1__neon(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_8x1__neon, 8, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_12x1__neon(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_12x1__neon, 12, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_16x1__neon(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_16x1__neon, 16, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_32x1__neon(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_32x1__neon, 32, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_4x1__neon_x2(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_4x1__neon_x2, 4, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_8x1__neon_x2(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_8x1__neon_x2, 8, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_16x1__neon_x2(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_16x1__neon_x2, 16, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_32x1__neon_x2(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_32x1__neon_x2, 32, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_4x1__neon_pipelined(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined, 4, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_8x1__neon_pipelined(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined, 8, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_16x1__neon_pipelined(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined, 16, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  static void spmm80_32x1__neon_pipelined(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined, 32, 1, 0.8f, benchmark::utils::CheckNEON);
+  }
+
+  BENCHMARK_SPMM(spmm80_4x1__neon)
+  BENCHMARK_SPMM(spmm80_4x1__neon_pipelined)
+  BENCHMARK_SPMM(spmm80_4x1__neon_x2)
+  BENCHMARK_SPMM(spmm80_8x1__neon)
+  BENCHMARK_SPMM(spmm80_8x1__neon_pipelined)
+  BENCHMARK_SPMM(spmm80_8x1__neon_x2)
+  BENCHMARK_SPMM(spmm80_12x1__neon)
+  BENCHMARK_SPMM(spmm80_16x1__neon)
+  BENCHMARK_SPMM(spmm80_16x1__neon_pipelined)
+  BENCHMARK_SPMM(spmm80_16x1__neon_x2)
+  BENCHMARK_SPMM(spmm80_32x1__neon)
+  BENCHMARK_SPMM(spmm80_32x1__neon_pipelined)
+  BENCHMARK_SPMM(spmm80_32x1__neon_x2)
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
 #if XNN_ARCH_ARM64
   static void spmm80_4x1__neonfma(benchmark::State& state, const char* net) {
     SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_4x1__neonfma, 4, 1, 0.8f);
@@ -252,29 +322,29 @@
     SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_32x1__neonfma_pipelined, 32, 1, 0.8f);
   }
 
-  BENCHMARK_GEMM(spmm80_4x1__neonfma)
-  BENCHMARK_GEMM(spmm80_4x1__neonfma_pipelined)
-  BENCHMARK_GEMM(spmm80_4x1__neonfma_x2)
-  BENCHMARK_GEMM(spmm80_4x2__neonfma)
-  BENCHMARK_GEMM(spmm80_4x4__neonfma)
-  BENCHMARK_GEMM(spmm80_8x1__neonfma)
-  BENCHMARK_GEMM(spmm80_8x1__neonfma_pipelined)
-  BENCHMARK_GEMM(spmm80_8x1__neonfma_x2)
-  BENCHMARK_GEMM(spmm80_8x2__neonfma)
-  BENCHMARK_GEMM(spmm80_8x4__neonfma)
-  BENCHMARK_GEMM(spmm80_12x1__neonfma)
-  BENCHMARK_GEMM(spmm80_12x2__neonfma)
-  BENCHMARK_GEMM(spmm80_12x4__neonfma)
-  BENCHMARK_GEMM(spmm80_16x1__neonfma)
-  BENCHMARK_GEMM(spmm80_16x1__neonfma_pipelined)
-  BENCHMARK_GEMM(spmm80_16x1__neonfma_x2)
-  BENCHMARK_GEMM(spmm80_16x2__neonfma)
-  BENCHMARK_GEMM(spmm80_16x4__neonfma)
-  BENCHMARK_GEMM(spmm80_32x1__neonfma)
-  BENCHMARK_GEMM(spmm80_32x1__neonfma_pipelined)
-  BENCHMARK_GEMM(spmm80_32x1__neonfma_x2)
-  BENCHMARK_GEMM(spmm80_32x2__neonfma)
-  BENCHMARK_GEMM(spmm80_32x4__neonfma)
+  BENCHMARK_SPMM(spmm80_4x1__neonfma)
+  BENCHMARK_SPMM(spmm80_4x1__neonfma_pipelined)
+  BENCHMARK_SPMM(spmm80_4x1__neonfma_x2)
+  BENCHMARK_SPMM(spmm80_4x2__neonfma)
+  BENCHMARK_SPMM(spmm80_4x4__neonfma)
+  BENCHMARK_SPMM(spmm80_8x1__neonfma)
+  BENCHMARK_SPMM(spmm80_8x1__neonfma_pipelined)
+  BENCHMARK_SPMM(spmm80_8x1__neonfma_x2)
+  BENCHMARK_SPMM(spmm80_8x2__neonfma)
+  BENCHMARK_SPMM(spmm80_8x4__neonfma)
+  BENCHMARK_SPMM(spmm80_12x1__neonfma)
+  BENCHMARK_SPMM(spmm80_12x2__neonfma)
+  BENCHMARK_SPMM(spmm80_12x4__neonfma)
+  BENCHMARK_SPMM(spmm80_16x1__neonfma)
+  BENCHMARK_SPMM(spmm80_16x1__neonfma_pipelined)
+  BENCHMARK_SPMM(spmm80_16x1__neonfma_x2)
+  BENCHMARK_SPMM(spmm80_16x2__neonfma)
+  BENCHMARK_SPMM(spmm80_16x4__neonfma)
+  BENCHMARK_SPMM(spmm80_32x1__neonfma)
+  BENCHMARK_SPMM(spmm80_32x1__neonfma_pipelined)
+  BENCHMARK_SPMM(spmm80_32x1__neonfma_x2)
+  BENCHMARK_SPMM(spmm80_32x2__neonfma)
+  BENCHMARK_SPMM(spmm80_32x4__neonfma)
 #endif  // XNN_ARCH_ARM64
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
@@ -294,10 +364,10 @@
     SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_32x1__sse, 32, 1, 0.8f);
   }
 
-  BENCHMARK_GEMM(spmm80_4x1__sse)
-  BENCHMARK_GEMM(spmm80_8x1__sse)
-  BENCHMARK_GEMM(spmm80_16x1__sse)
-  BENCHMARK_GEMM(spmm80_32x1__sse)
+  BENCHMARK_SPMM(spmm80_4x1__sse)
+  BENCHMARK_SPMM(spmm80_8x1__sse)
+  BENCHMARK_SPMM(spmm80_16x1__sse)
+  BENCHMARK_SPMM(spmm80_32x1__sse)
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 static void spmm80_1x1__scalar(benchmark::State& state, const char* net) {
@@ -340,16 +410,16 @@
   SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined, 8, 1, 0.8f);
 }
 
-BENCHMARK_GEMM(spmm80_1x1__scalar)
-BENCHMARK_GEMM(spmm80_2x1__scalar)
-BENCHMARK_GEMM(spmm80_4x1__scalar)
-BENCHMARK_GEMM(spmm80_8x1__scalar)
-BENCHMARK_GEMM(spmm80_8x2__scalar)
-BENCHMARK_GEMM(spmm80_8x4__scalar)
-BENCHMARK_GEMM(spmm80_1x1__scalar_pipelined)
-BENCHMARK_GEMM(spmm80_2x1__scalar_pipelined)
-BENCHMARK_GEMM(spmm80_4x1__scalar_pipelined)
-BENCHMARK_GEMM(spmm80_8x1__scalar_pipelined)
+BENCHMARK_SPMM(spmm80_1x1__scalar)
+BENCHMARK_SPMM(spmm80_2x1__scalar)
+BENCHMARK_SPMM(spmm80_4x1__scalar)
+BENCHMARK_SPMM(spmm80_8x1__scalar)
+BENCHMARK_SPMM(spmm80_8x2__scalar)
+BENCHMARK_SPMM(spmm80_8x4__scalar)
+BENCHMARK_SPMM(spmm80_1x1__scalar_pipelined)
+BENCHMARK_SPMM(spmm80_2x1__scalar_pipelined)
+BENCHMARK_SPMM(spmm80_4x1__scalar_pipelined)
+BENCHMARK_SPMM(spmm80_8x1__scalar_pipelined)
 
 #if XNN_ARCH_WASMSIMD
   static void spmm80_4x1__wasmsimd_arm(benchmark::State& state, const char* net) {
@@ -511,46 +581,46 @@
     SpMMBenchmark(state, xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_pipelined_x2, 16, 1, 0.8f);
   }
 
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_arm)
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_arm_x2)
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_arm_x4)
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_x86)
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_x86_x2)
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_x86_x4)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_arm)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_arm_x2)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_arm_x4)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_x86)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_x86_x2)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_x86_x4)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_arm)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_arm_x2)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_arm_x4)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_x86)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_x86_x2)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_x86_x4)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_arm)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_arm_x2)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_arm_x4)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_x86)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_x86_x2)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_x86_x4)
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_arm_pipelined)
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_arm_pipelined_x2)
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_x86_pipelined)
-  BENCHMARK_GEMM(spmm80_4x1__wasmsimd_x86_pipelined_x2)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_arm_pipelined)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_arm_pipelined_x2)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_x86_pipelined)
-  BENCHMARK_GEMM(spmm80_8x1__wasmsimd_x86_pipelined_x2)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_arm_pipelined)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_arm_pipelined_x2)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_x86_pipelined)
-  BENCHMARK_GEMM(spmm80_16x1__wasmsimd_x86_pipelined_x2)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_arm_pipelined)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_arm_pipelined_x2)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_x86_pipelined)
-  BENCHMARK_GEMM(spmm80_32x1__wasmsimd_x86_pipelined_x2)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_arm)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_arm_x2)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_arm_x4)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_x86)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_x86_x2)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_x86_x4)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_arm)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_arm_x2)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_arm_x4)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_x86)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_x86_x2)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_x86_x4)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_arm)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_arm_x2)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_arm_x4)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_x86)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_x86_x2)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_x86_x4)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_arm)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_arm_x2)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_arm_x4)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_x86)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_x86_x2)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_x86_x4)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_arm_pipelined)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_arm_pipelined_x2)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_x86_pipelined)
+  BENCHMARK_SPMM(spmm80_4x1__wasmsimd_x86_pipelined_x2)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_arm_pipelined)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_arm_pipelined_x2)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_x86_pipelined)
+  BENCHMARK_SPMM(spmm80_8x1__wasmsimd_x86_pipelined_x2)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_arm_pipelined)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_arm_pipelined_x2)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_x86_pipelined)
+  BENCHMARK_SPMM(spmm80_16x1__wasmsimd_x86_pipelined_x2)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_arm_pipelined)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_arm_pipelined_x2)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_x86_pipelined)
+  BENCHMARK_SPMM(spmm80_32x1__wasmsimd_x86_pipelined_x2)
 
 #endif  // XNN_ARCH_WASMSIMD
 
diff --git a/bench/f32-velu.cc b/bench/f32-velu.cc
new file mode 100644
index 0000000..f3393b4
--- /dev/null
+++ b/bench/f32-velu.cc
@@ -0,0 +1,777 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <algorithm>
+#include <cmath>
+#include <functional>
+#include <random>
+#include <vector>
+
+#include <benchmark/benchmark.h>
+#include "bench/utils.h"
+
+#include <xnnpack/AlignedAllocator.h>
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+#include <xnnpack/params.h>
+#include <xnnpack/params-init.h>
+
+
+static void f32_elu(
+  benchmark::State& state,
+  xnn_f32_velu_ukernel_function elu,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
+{
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
+  const size_t elements = state.range(0);
+
+  std::random_device random_device;
+  auto rng = std::mt19937(random_device());
+  auto f32rng = std::bind(std::uniform_real_distribution<float>(-20.0f, 10.0f), std::ref(rng));
+
+  std::vector<float, AlignedAllocator<float, 64>> x(elements);
+  std::vector<float, AlignedAllocator<float, 64>> y(elements);
+  std::generate(x.begin(), x.end(), std::ref(f32rng));
+  std::fill(y.begin(), y.end(), std::nanf(""));
+
+  const union xnn_f32_elu_params params =
+    xnn_init_f32_elu_params(1.0f /* prescale */, 1.0f /* alpha */, 1.0f /* beta */);
+  for (auto _ : state) {
+    elu(elements * sizeof(float), x.data(), y.data(), &params);
+  }
+
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
+  state.counters["elements"] =
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
+  state.counters["bytes"] =
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
+}
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  BENCHMARK_CAPTURE(f32_elu, neonfma_lut16_p3_x4, xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_lut16_p3_x8, xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_lut16_p3_x12, xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_lut16_p3_x16, xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_lut16_p3_x20, xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_lut16_p3_x24, xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, neonfma_p6_x4, xnn_f32_velu_ukernel__neonfma_rr1_p6_x4, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_p6_x8, xnn_f32_velu_ukernel__neonfma_rr1_p6_x8, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_p6_x12, xnn_f32_velu_ukernel__neonfma_rr1_p6_x12, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_p6_x16, xnn_f32_velu_ukernel__neonfma_rr1_p6_x16, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_p6_x20, xnn_f32_velu_ukernel__neonfma_rr1_p6_x20, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neonfma_p6_x24, xnn_f32_velu_ukernel__neonfma_rr1_p6_x24, benchmark::utils::CheckNEONFMA)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, neon_lut16_p3_x4, xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_lut16_p3_x8, xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_lut16_p3_x12, xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_lut16_p3_x16, xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_lut16_p3_x20, xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_lut16_p3_x24, xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, neon_p6_x4, xnn_f32_velu_ukernel__neon_rr2_p6_x4, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_p6_x8, xnn_f32_velu_ukernel__neon_rr2_p6_x8, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_p6_x12, xnn_f32_velu_ukernel__neon_rr2_p6_x12, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_p6_x16, xnn_f32_velu_ukernel__neon_rr2_p6_x16, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_p6_x20, xnn_f32_velu_ukernel__neon_rr2_p6_x20, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, neon_p6_x24, xnn_f32_velu_ukernel__neon_rr2_p6_x24, benchmark::utils::CheckNEON)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  BENCHMARK_CAPTURE(f32_elu, avx512f_lut16_p3_x16, xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_lut16_p3_x32, xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_lut16_p3_x48, xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_lut16_p3_x64, xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_lut16_p3_x80, xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_lut16_p3_x96, xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_lut16_p3_x112, xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_lut16_p3_x128, xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, avx512f_p6_x16, xnn_f32_velu_ukernel__avx512f_rr1_p6_x16, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_p6_x32, xnn_f32_velu_ukernel__avx512f_rr1_p6_x32, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_p6_x48, xnn_f32_velu_ukernel__avx512f_rr1_p6_x48, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_p6_x64, xnn_f32_velu_ukernel__avx512f_rr1_p6_x64, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_p6_x80, xnn_f32_velu_ukernel__avx512f_rr1_p6_x80, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_p6_x96, xnn_f32_velu_ukernel__avx512f_rr1_p6_x96, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_p6_x112, xnn_f32_velu_ukernel__avx512f_rr1_p6_x112, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx512f_p6_x128, xnn_f32_velu_ukernel__avx512f_rr1_p6_x128, benchmark::utils::CheckAVX512F)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x8, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x16, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x24, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x32, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x40, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x48, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x56, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x64, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x72, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut4_p4_x80, xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x8, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x16, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x24, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x32, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x40, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x48, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x56, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x64, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x72, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut8_p4_x80, xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x8, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x16, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x24, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x32, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x40, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x48, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x56, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x64, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x72, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_lut16_p3_x80, xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x8, xnn_f32_velu_ukernel__avx2_rr1_p6_x8, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x16, xnn_f32_velu_ukernel__avx2_rr1_p6_x16, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x24, xnn_f32_velu_ukernel__avx2_rr1_p6_x24, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x32, xnn_f32_velu_ukernel__avx2_rr1_p6_x32, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x40, xnn_f32_velu_ukernel__avx2_rr1_p6_x40, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x48, xnn_f32_velu_ukernel__avx2_rr1_p6_x48, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x56, xnn_f32_velu_ukernel__avx2_rr1_p6_x56, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x64, xnn_f32_velu_ukernel__avx2_rr1_p6_x64, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x72, xnn_f32_velu_ukernel__avx2_rr1_p6_x72, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx2_p6_x80, xnn_f32_velu_ukernel__avx2_rr1_p6_x80, benchmark::utils::CheckAVX2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, avx_lut4_p4_x8, xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut4_p4_x16, xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut4_p4_x24, xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut4_p4_x32, xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut4_p4_x40, xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut4_p4_x48, xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, avx_lut16_p3_x8, xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut16_p3_x16, xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut16_p3_x24, xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut16_p3_x32, xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut16_p3_x40, xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_lut16_p3_x48, xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, avx_p6_x8, xnn_f32_velu_ukernel__avx_rr2_p6_x8, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_p6_x16, xnn_f32_velu_ukernel__avx_rr2_p6_x16, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_p6_x24, xnn_f32_velu_ukernel__avx_rr2_p6_x24, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_p6_x32, xnn_f32_velu_ukernel__avx_rr2_p6_x32, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_p6_x40, xnn_f32_velu_ukernel__avx_rr2_p6_x40, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, avx_p6_x48, xnn_f32_velu_ukernel__avx_rr2_p6_x48, benchmark::utils::CheckAVX)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, sse41_lut16_p3_x4, xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_lut16_p3_x8, xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_lut16_p3_x12, xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_lut16_p3_x16, xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_lut16_p3_x20, xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_lut16_p3_x24, xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, sse41_p6_x4, xnn_f32_velu_ukernel__sse41_rr2_p6_x4, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_p6_x8, xnn_f32_velu_ukernel__sse41_rr2_p6_x8, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_p6_x12, xnn_f32_velu_ukernel__sse41_rr2_p6_x12, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_p6_x16, xnn_f32_velu_ukernel__sse41_rr2_p6_x16, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_p6_x20, xnn_f32_velu_ukernel__sse41_rr2_p6_x20, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse41_p6_x24, xnn_f32_velu_ukernel__sse41_rr2_p6_x24, benchmark::utils::CheckSSE41)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, sse2_lut16_p3_x4, xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_lut16_p3_x8, xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_lut16_p3_x12, xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_lut16_p3_x16, xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_lut16_p3_x20, xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_lut16_p3_x24, xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, sse2_p6_x4, xnn_f32_velu_ukernel__sse2_rr2_p6_x4)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_p6_x8, xnn_f32_velu_ukernel__sse2_rr2_p6_x8)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_p6_x12, xnn_f32_velu_ukernel__sse2_rr2_p6_x12)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_p6_x16, xnn_f32_velu_ukernel__sse2_rr2_p6_x16)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_p6_x20, xnn_f32_velu_ukernel__sse2_rr2_p6_x20)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, sse2_p6_x24, xnn_f32_velu_ukernel__sse2_rr2_p6_x24)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+#if XNN_ARCH_WASMSIMD
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_lut16_p3_x4, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_lut16_p3_x8, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_lut16_p3_x12, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_lut16_p3_x16, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_lut16_p3_x20, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_lut16_p3_x24, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_lut16_p3_x4, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_lut16_p3_x8, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_lut16_p3_x12, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_lut16_p3_x16, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_lut16_p3_x20, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_lut16_p3_x24, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_p6_x4, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_p6_x8, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_p6_x12, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_p6_x16, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_p6_x20, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_arm_p6_x24, xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_p6_x4, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_p6_x8, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_p6_x12, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_p6_x16, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_p6_x20, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasmsimd_x86_p6_x24, xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+#endif  // XNN_ARCH_WASMSIMD
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  BENCHMARK_CAPTURE(f32_elu, wasm_lut16_p3_x1, xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_lut16_p3_x2, xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_lut16_p3_x3, xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_lut16_p3_x4, xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_lut16_p3_x5, xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_lut16_p3_x6, xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+
+  BENCHMARK_CAPTURE(f32_elu, wasm_p6_x1, xnn_f32_velu_ukernel__wasm_rr2_p6_x1)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_p6_x2, xnn_f32_velu_ukernel__wasm_rr2_p6_x2)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_p6_x3, xnn_f32_velu_ukernel__wasm_rr2_p6_x3)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_p6_x4, xnn_f32_velu_ukernel__wasm_rr2_p6_x4)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_p6_x5, xnn_f32_velu_ukernel__wasm_rr2_p6_x5)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+  BENCHMARK_CAPTURE(f32_elu, wasm_p6_x6, xnn_f32_velu_ukernel__wasm_rr2_p6_x6)
+    ->RangeMultiplier(10)
+    ->Range(1000, 1000000)
+    ->UseRealTime();
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+BENCHMARK_CAPTURE(f32_elu, scalar_lut16_p3_x1, xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_lut16_p3_x2, xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_lut16_p3_x3, xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_lut16_p3_x4, xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_lut16_p3_x5, xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_lut16_p3_x6, xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+
+BENCHMARK_CAPTURE(f32_elu, scalar_p6_x1, xnn_f32_velu_ukernel__scalar_rr2_p6_x1)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_p6_x2, xnn_f32_velu_ukernel__scalar_rr2_p6_x2)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_p6_x3, xnn_f32_velu_ukernel__scalar_rr2_p6_x3)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_p6_x4, xnn_f32_velu_ukernel__scalar_rr2_p6_x4)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_p6_x5, xnn_f32_velu_ukernel__scalar_rr2_p6_x5)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+BENCHMARK_CAPTURE(f32_elu, scalar_p6_x6, xnn_f32_velu_ukernel__scalar_rr2_p6_x6)
+  ->RangeMultiplier(10)
+  ->Range(1000, 1000000)
+  ->UseRealTime();
+
+#ifndef XNNPACK_BENCHMARK_NO_MAIN
+BENCHMARK_MAIN();
+#endif
diff --git a/bench/f32-vscaleexpminusmax.cc b/bench/f32-vscaleexpminusmax.cc
index 8f77d60..568a982 100644
--- a/bench/f32-vscaleexpminusmax.cc
+++ b/bench/f32-vscaleexpminusmax.cc
@@ -28,18 +28,18 @@
     return;
   }
 
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
   const size_t cache_line_size_max = 128;
-  const size_t packed_n = benchmark::utils::RoundUp(n, cache_line_size_max / sizeof(float));
+  const size_t packed_elements = benchmark::utils::RoundUp(elements, cache_line_size_max / sizeof(float));
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-1000.0f, 1000.0f), std::ref(rng));
 
   const size_t num_buffers = 1 +
-    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_n * sizeof(float));
-  std::vector<float, AlignedAllocator<float, 64>> x(n);
-  std::vector<float, AlignedAllocator<float, 64>> y(packed_n * num_buffers);
+    benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_elements * sizeof(float));
+  std::vector<float, AlignedAllocator<float, 64>> x(elements);
+  std::vector<float, AlignedAllocator<float, 64>> y(packed_elements * num_buffers);
 
   std::generate(x.begin(), x.end(), std::ref(f32rng));
 
@@ -49,22 +49,29 @@
   for (auto _ : state) {
     state.PauseTiming();
     float x_max = nanf("");
-    rmax(n * sizeof(float), x.data(), &x_max);
+    rmax(elements * sizeof(float), x.data(), &x_max);
     float y_sum = nanf("");
-    raddexpminusmax(n * sizeof(float), x.data(), &y_sum, x_max);
+    raddexpminusmax(elements * sizeof(float), x.data(), &y_sum, x_max);
     if (++buffer_index == num_buffers) {
       buffer_index = 0;
     }
     state.ResumeTiming();
 
-    vscaleexpminusmax(n * sizeof(float), x.data(), y.data() + packed_n * buffer_index, x_max, 1.0f / y_sum);
+    vscaleexpminusmax(elements * sizeof(float), x.data(), y.data() + packed_elements * buffer_index, x_max, 1.0f / y_sum);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * 2 * sizeof(float) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 static void CharacteristicArguments(benchmark::internal::Benchmark* b) {
diff --git a/bench/f32-vscaleextexp.cc b/bench/f32-vscaleextexp.cc
index e955e3a..23aa20f 100644
--- a/bench/f32-vscaleextexp.cc
+++ b/bench/f32-vscaleextexp.cc
@@ -26,9 +26,9 @@
     return;
   }
 
-  const size_t n = state.range(0);
+  const size_t elements = state.range(0);
   const size_t cache_line_size_max = 128;
-  const size_t packed_n = benchmark::utils::RoundUp(n, cache_line_size_max / sizeof(float));
+  const size_t packed_n = benchmark::utils::RoundUp(elements, cache_line_size_max / sizeof(float));
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
@@ -36,7 +36,7 @@
 
   const size_t num_buffers = 1 +
     benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(), packed_n * sizeof(float));
-  std::vector<float, AlignedAllocator<float, 64>> x(n);
+  std::vector<float, AlignedAllocator<float, 64>> x(elements);
   std::vector<float, AlignedAllocator<float, 64>> y(packed_n * num_buffers);
 
   std::generate(x.begin(), x.end(), std::ref(f32rng));
@@ -47,7 +47,7 @@
   for (auto _ : state) {
     state.PauseTiming();
     float scale[2];
-    raddextexp(n * sizeof(float), x.data(), scale);
+    raddextexp(elements * sizeof(float), x.data(), scale);
     const float ext_mantissa = 1.0f / scale[0];
     const float ext_exponent = -scale[1];
     if (++buffer_index == num_buffers) {
@@ -55,14 +55,21 @@
     }
     state.ResumeTiming();
 
-    vscaleextexp(n * sizeof(float), x.data(), y.data() + packed_n * buffer_index, ext_mantissa, ext_exponent);
+    vscaleextexp(elements * sizeof(float), x.data(), y.data() + packed_n * buffer_index, ext_mantissa, ext_exponent);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
+  const size_t elements_per_iteration = elements;
   state.counters["elements"] =
-    benchmark::Counter(uint64_t(state.iterations()) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * elements_per_iteration, benchmark::Counter::kIsRate);
+
+  const size_t bytes_per_iteration = 2 * elements * sizeof(float);
   state.counters["bytes"] =
-    benchmark::Counter(uint64_t(state.iterations()) * 2 * sizeof(float) * n, benchmark::Counter::kIsRate);
+    benchmark::Counter(uint64_t(state.iterations()) * bytes_per_iteration, benchmark::Counter::kIsRate);
 }
 
 static void CharacteristicArguments(benchmark::internal::Benchmark* b) {
diff --git a/bench/floor.cc b/bench/floor.cc
index da46368..08fd7e9 100644
--- a/bench/floor.cc
+++ b/bench/floor.cc
@@ -77,7 +77,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -184,7 +187,10 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
diff --git a/bench/global-average-pooling.cc b/bench/global-average-pooling.cc
index 517c679..e6dbebd 100644
--- a/bench/global-average-pooling.cc
+++ b/bench/global-average-pooling.cc
@@ -70,7 +70,11 @@
   }
   global_pooling_op = nullptr;
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) *
       batch_size * (input_height * input_width + 1) * channels * sizeof(uint8_t),
@@ -129,7 +133,11 @@
   }
   global_pooling_op = nullptr;
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) *
       batch_size * (input_height * input_width + 1) * channels * sizeof(int8_t),
@@ -189,7 +197,11 @@
   }
   global_pooling_op = nullptr;
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) *
       batch_size * (input_height * input_width + 1) * channels * sizeof(uint16_t),
@@ -244,7 +256,11 @@
   }
   global_pooling_op = nullptr;
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) *
       batch_size * (input_height * input_width + 1) * channels * sizeof(float),
diff --git a/bench/hardswish.cc b/bench/hardswish.cc
index 7eb91c4..385976d 100644
--- a/bench/hardswish.cc
+++ b/bench/hardswish.cc
@@ -79,7 +79,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -186,7 +189,10 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -254,7 +260,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
diff --git a/bench/max-pooling.cc b/bench/max-pooling.cc
index ea52622..d69c1de 100644
--- a/bench/max-pooling.cc
+++ b/bench/max-pooling.cc
@@ -86,7 +86,11 @@
   }
   pooling_op = nullptr;
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) *
       batch_size * (input_height * input_width + output_height * output_width) * channels * sizeof(uint8_t),
@@ -159,7 +163,11 @@
   }
   pooling_op = nullptr;
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["bytes"] = benchmark::Counter(
     uint64_t(state.iterations()) *
       batch_size * (input_height * input_width + output_height * output_width) * channels * sizeof(float),
diff --git a/bench/prelu.cc b/bench/prelu.cc
index 7bed219..5c0700e 100644
--- a/bench/prelu.cc
+++ b/bench/prelu.cc
@@ -82,7 +82,10 @@
   }
   prelu_op = nullptr;
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * height * width * channels;
   state.counters["elements"] =
@@ -207,7 +210,10 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * height * width * channels;
   state.counters["elements"] =
diff --git a/bench/qs8-gemm.cc b/bench/qs8-gemm.cc
index 9b04607..f50452c 100644
--- a/bench/qs8-gemm.cc
+++ b/bench/qs8-gemm.cc
@@ -100,7 +100,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["OPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
@@ -177,7 +181,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["OPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
@@ -260,7 +268,11 @@
     ruy::Mul(ruy_a, ruy_b, mul_params, &context, &ruy_c);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["OPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
diff --git a/bench/qs8-requantization.cc b/bench/qs8-requantization.cc
index c20f2b4..7f02109 100644
--- a/bench/qs8-requantization.cc
+++ b/bench/qs8-requantization.cc
@@ -116,22 +116,6 @@
   }
 }
 
-#if !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
-  BENCHMARK_F(Requantization, precise__psimd)(benchmark::State& state) {
-    for (auto _ : state) {
-      xnn_qs8_requantize_precise__psimd(
-          n(), input(), 0x1.0p-12f /* scale */, -1 /* zero point */, -127 /* qmin */, 126 /* qmax */, output());
-    }
-  }
-
-  BENCHMARK_F(Requantization, fp32__psimd)(benchmark::State& state) {
-    for (auto _ : state) {
-      xnn_qs8_requantize_fp32__psimd(
-          n(), input(), 0x1.0p-12f /* scale */, -1 /* zero point */, -127 /* qmin */, 126 /* qmax */, output());
-    }
-  }
-#endif  // !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
-
 
 #if XNN_ARCH_WASMSIMD
   BENCHMARK_F(Requantization, fp32__wasmsimd)(benchmark::State& state) {
diff --git a/bench/qu8-gemm.cc b/bench/qu8-gemm.cc
index 8934f6d..7b95785 100644
--- a/bench/qu8-gemm.cc
+++ b/bench/qu8-gemm.cc
@@ -16,8 +16,6 @@
 #include <random>
 #include <vector>
 
-#include <cpuinfo.h>
-
 #include <benchmark/benchmark.h>
 #ifdef BENCHMARK_GEMMLOWP
 #include "gemmlowp/public/gemmlowp.h"
@@ -37,13 +35,9 @@
 
 static void GEMMBenchmark(benchmark::State& state,
   xnn_qu8_gemm_ukernel_function gemm,
-  size_t mr, size_t nr, size_t kr, size_t sr)
+  size_t mr, size_t nr, size_t kr, size_t sr,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
-    return;
-  }
-
   const size_t mc = state.range(0);
   const size_t nc = state.range(1);
   const size_t kc = state.range(2);
@@ -104,7 +98,11 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["OPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
@@ -145,11 +143,6 @@
 
 static void GemmlowpBenchmark(benchmark::State& state, uint32_t threads)
 {
-  if (!cpuinfo_initialize()) {
-    state.SkipWithError("cpuinfo initialization failed");
-    return;
-  }
-
   const size_t mc = state.range(0);
   const size_t nc = state.range(1);
   const size_t kc = state.range(2);
@@ -194,7 +187,11 @@
         &threadingContext, AM, BM, &CM, 127, 127, outputPipeline);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["OPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
@@ -284,7 +281,11 @@
     ruy::Mul(ruy_a, ruy_b, mul_params, &context, &ruy_c);
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
+
   state.counters["OPS"] = benchmark::Counter(
     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
 }
@@ -298,11 +299,11 @@
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
   static void qu8_gemm_4x8__neon(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_qu8_gemm_minmax_ukernel_4x8__neon, 4, 8, 1, 1);
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_ukernel_4x8__neon, 4, 8, 1, 1, benchmark::utils::CheckNEON);
   }
 
   static void qu8_gemm_8x8__neon(benchmark::State& state, const char* net) {
-    GEMMBenchmark(state, xnn_qu8_gemm_minmax_ukernel_8x8__neon, 8, 8, 1, 1);
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_ukernel_8x8__neon, 8, 8, 1, 1, benchmark::utils::CheckNEON);
   }
 
   BENCHMARK_GEMM(qu8_gemm_4x8__neon)
diff --git a/bench/qu8-requantization.cc b/bench/qu8-requantization.cc
index 2353484..7b11727 100644
--- a/bench/qu8-requantization.cc
+++ b/bench/qu8-requantization.cc
@@ -116,22 +116,6 @@
   }
 }
 
-#if !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
-  BENCHMARK_F(Requantization, precise__psimd)(benchmark::State& state) {
-    for (auto _ : state) {
-      xnn_qu8_requantize_precise__psimd(
-          n(), input(), 0x1.0p-12f /* scale */, 128 /* zero point */, 1 /* qmin */, 254 /* qmax */, output());
-    }
-  }
-
-  BENCHMARK_F(Requantization, fp32__psimd)(benchmark::State& state) {
-    for (auto _ : state) {
-      xnn_qu8_requantize_fp32__psimd(
-          n(), input(), 0x1.0p-12f /* scale */, 128 /* zero point */, 1 /* qmin */, 254 /* qmax */, output());
-    }
-  }
-#endif  // !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
-
 
 #if XNN_ARCH_WASMSIMD
   BENCHMARK_F(Requantization, fp32__wasmsimd)(benchmark::State& state) {
diff --git a/bench/sigmoid.cc b/bench/sigmoid.cc
index 439c30a..7c997bd 100644
--- a/bench/sigmoid.cc
+++ b/bench/sigmoid.cc
@@ -1,10 +1,13 @@
 // Copyright (c) Facebook, Inc. and its affiliates.
 // All rights reserved.
 //
+// Copyright 2020 Google LLC
+//
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
 #include <algorithm>
+#include <array>
 #include <cmath>
 #include <functional>
 #include <limits>
@@ -24,6 +27,7 @@
 #include "tensorflow/lite/version.h"
 #endif  // BENCHMARK_TENSORFLOW_LITE
 
+
 #ifndef XNN_NO_QU8_OPERATORS
 static void xnnpack_sigmoid_qu8(benchmark::State& state) {
   const size_t batch_size = state.range(0);
@@ -31,7 +35,8 @@
 
   std::random_device random_device;
   auto rng = std::mt19937(random_device());
-  auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), std::ref(rng));
+  auto u8rng = std::bind(
+    std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), std::ref(rng));
 
   std::vector<uint8_t> input(batch_size * channels);
   std::vector<uint8_t> output(batch_size * channels);
@@ -80,7 +85,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -144,7 +152,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -165,60 +176,58 @@
   auto f32rng = std::bind(std::uniform_real_distribution<float>(-10.0f, 10.0f), std::ref(rng));
 
   flatbuffers::FlatBufferBuilder builder;
-  flatbuffers::Offset<tflite::OperatorCode> operator_code =
+  const flatbuffers::Offset<tflite::OperatorCode> operator_code =
       CreateOperatorCode(builder, tflite::BuiltinOperator_LOGISTIC);
 
-  flatbuffers::Offset<tflite::Buffer> buffers[1] = {
+  const std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers{{
     tflite::CreateBuffer(builder, builder.CreateVector({})),
-  };
+  }};
 
-  const int32_t input_shape[4] = {
+  const std::array<int32_t, 4> input_shape{{
     static_cast<int32_t>(batch_size),
     static_cast<int32_t>(1 /* height */),
     static_cast<int32_t>(1 /* width */),
     static_cast<int32_t>(channels)
-  };
-  const int32_t output_shape[4] = {
+  }};
+  const std::array<int32_t, 4> output_shape{{
     static_cast<int32_t>(batch_size),
     static_cast<int32_t>(1 /* height */),
     static_cast<int32_t>(1 /* width */),
     static_cast<int32_t>(channels)
-  };
+  }};
 
-  flatbuffers::Offset<tflite::Tensor> tensors[2] = {
+  const std::array<flatbuffers::Offset<tflite::Tensor>, 2> tensors{{
     tflite::CreateTensor(builder,
-                         builder.CreateVector<int32_t>(input_shape, 4),
+                         builder.CreateVector<int32_t>(input_shape.data(), input_shape.size()),
                          tflite::TensorType_FLOAT32),
     tflite::CreateTensor(builder,
-                         builder.CreateVector<int32_t>(output_shape, 4),
+                         builder.CreateVector<int32_t>(output_shape.data(), output_shape.size()),
                          tflite::TensorType_FLOAT32),
-  };
+  }};
 
-  const int32_t op_inputs[1] = { 0 };
-  const int32_t op_outputs[1] = { 1 };
+  const std::array<int32_t, 1> op_inputs{{ 0 }};
+  const std::array<int32_t, 1> op_outputs{{ 1 }};
   flatbuffers::Offset<tflite::Operator> op = tflite::CreateOperator(
       builder,
       0 /* opcode_index */,
-      builder.CreateVector<int32_t>(op_inputs, 1),
-      builder.CreateVector<int32_t>(op_outputs, 1));
+      builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
+      builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()));
 
-  const int32_t graph_inputs[1] = { 0 };
-  const int32_t graph_outputs[1] = { 1 };
-  flatbuffers::Offset<tflite::SubGraph> subgraph = tflite::CreateSubGraph(
+  const std::array<int32_t, 1> graph_inputs{{ 0 }};
+  const std::array<int32_t, 1> graph_outputs{{ 1 }};
+  const flatbuffers::Offset<tflite::SubGraph> subgraph = tflite::CreateSubGraph(
       builder,
-      builder.CreateVector(tensors, 2),
-      builder.CreateVector<int32_t>(graph_inputs, 1),
-      builder.CreateVector<int32_t>(graph_outputs, 1),
+      builder.CreateVector(tensors.data(), tensors.size()),
+      builder.CreateVector<int32_t>(graph_inputs.data(), graph_inputs.size()),
+      builder.CreateVector<int32_t>(graph_outputs.data(), graph_outputs.size()),
       builder.CreateVector(&op, 1));
 
-  flatbuffers::Offset<flatbuffers::String> description = builder.CreateString("Sigmoid model");
-
-  flatbuffers::Offset<tflite::Model> model_buffer = tflite::CreateModel(builder,
+  const flatbuffers::Offset<tflite::Model> model_buffer = tflite::CreateModel(builder,
       TFLITE_SCHEMA_VERSION,
       builder.CreateVector(&operator_code, 1),
       builder.CreateVector(&subgraph, 1),
-      description,
-      builder.CreateVector(buffers, 1));
+      builder.CreateString("Sigmoid model"),
+      builder.CreateVector(buffers.data(), buffers.size()));
 
   builder.Finish(model_buffer);
 
@@ -253,7 +262,10 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
diff --git a/bench/softmax.cc b/bench/softmax.cc
index cfe1430..d77e860 100644
--- a/bench/softmax.cc
+++ b/bench/softmax.cc
@@ -78,7 +78,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -141,7 +144,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -255,7 +261,10 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
diff --git a/bench/spmm.h b/bench/spmm.h
new file mode 100644
index 0000000..783fc0e
--- /dev/null
+++ b/bench/spmm.h
@@ -0,0 +1,385 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#pragma once
+
+#include <benchmark/benchmark.h>
+
+#define BENCHMARK_SPMM(spmm_fn) \
+  BENCHMARK_CAPTURE(spmm_fn, mobilenet_v1, "MobileNet v1")->Apply(MobileNetV1SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, mobilenet_v2, "MobileNet v2")->Apply(MobileNetV2SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, mobilenet_v3_small, "MobileNet v3 Small")->Apply(MobileNetV3SmallSpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, mobilenet_v3_large, "MobileNet v3 Large")->Apply(MobileNetV3LargeSpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g1, "ShuffleNet v1 (1 group)")->Apply(ShuffleNetV1G1SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g2, "ShuffleNet v1 (2 groups)")->Apply(ShuffleNetV1G2SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g3, "ShuffleNet v1 (3 groups)")->Apply(ShuffleNetV1G3SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g4, "ShuffleNet v1 (4 groups)")->Apply(ShuffleNetV1G4SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g8, "ShuffleNet v1 (8 groups)")->Apply(ShuffleNetV1G8SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, shufflenet_v2_x05, "ShuffleNet v2 0.5X")->Apply(ShuffleNetV2X05SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, shufflenet_v2_x10, "ShuffleNet v2 1.0X")->Apply(ShuffleNetV2X10SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, shufflenet_v2_x15, "ShuffleNet v2 1.5X")->Apply(ShuffleNetV2X15SpmmArguments)->UseRealTime(); \
+  BENCHMARK_CAPTURE(spmm_fn, shufflenet_v2_x20, "ShuffleNet v2 2.0X")->Apply(ShuffleNetV2X20SpmmArguments)->UseRealTime();
+
+
+// ShuffleNet v1 with 1 group.
+static void ShuffleNetV1G1SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*          M      N    K */
+  b->Args({56 * 56,  36,  24});
+  b->Args({28 * 28, 120,  36});
+  b->Args({28 * 28,  36, 144});
+  b->Args({28 * 28, 144,  36});
+  b->Args({28 * 28,  72, 144});
+  b->Args({14 * 14, 144,  72});
+  b->Args({14 * 14,  72, 288});
+  b->Args({14 * 14, 288,  72});
+  b->Args({14 * 14, 144, 288});
+  b->Args({ 7 *  7, 288, 144});
+  b->Args({ 7 *  7, 144, 576});
+  b->Args({ 7 *  7, 576, 144});
+}
+
+// ShuffleNet v1 with 2 groups.
+static void ShuffleNetV1G2SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*          M      N    K */
+  b->Args({56 * 56,  50,  24});
+  b->Args({28 * 28,  88,  25});
+  b->Args({28 * 28,  25, 100});
+  b->Args({28 * 28, 100,  25});
+  b->Args({28 * 28,  50, 100});
+  b->Args({14 * 14, 100,  50});
+  b->Args({14 * 14,  50, 200});
+  b->Args({14 * 14, 200,  50});
+  b->Args({14 * 14, 100, 200});
+  b->Args({ 7 *  7, 200, 100});
+  b->Args({ 7 *  7, 100, 400});
+  b->Args({ 7 *  7, 400, 100});
+}
+
+// ShuffleNet v1 with 3 groups.
+static void ShuffleNetV1G3SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*          M      N    K */
+  b->Args({56 * 56,  60,  24});
+  b->Args({28 * 28,  72,  20});
+  b->Args({28 * 28,  20,  80});
+  b->Args({28 * 28,  80,  20});
+  b->Args({28 * 28,  40,  80});
+  b->Args({14 * 14,  80,  40});
+  b->Args({14 * 14,  40, 160});
+  b->Args({14 * 14, 160,  40});
+  b->Args({14 * 14,  80, 160});
+  b->Args({ 7 *  7, 160,  80});
+  b->Args({ 7 *  7,  80, 320});
+  b->Args({ 7 *  7, 320,  80});
+}
+
+// ShuffleNet v1 with 4 groups.
+static void ShuffleNetV1G4SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*          M      N    K */
+  b->Args({56 * 56,  68,  24});
+  b->Args({28 * 28,  62,  17});
+  b->Args({28 * 28,  17,  68});
+  b->Args({28 * 28,  68,  17});
+  b->Args({28 * 28,  34,  68});
+  b->Args({14 * 14,  68,  34});
+  b->Args({14 * 14,  34, 136});
+  b->Args({14 * 14, 136,  34});
+  b->Args({14 * 14,  68, 136});
+  b->Args({ 7 *  7, 136,  68});
+  b->Args({ 7 *  7,  68, 272});
+  b->Args({ 7 *  7, 272,  68});
+}
+
+// ShuffleNet v1 with 8 groups.
+static void ShuffleNetV1G8SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*          M      N    K */
+  b->Args({56 * 56,  96,  24});
+  b->Args({28 * 28,  45,  12});
+  b->Args({28 * 28,  12,  48});
+  b->Args({28 * 28,  48,  12});
+  b->Args({28 * 28,  24,  48});
+  b->Args({14 * 14,  48,  24});
+  b->Args({14 * 14,  24,  96});
+  b->Args({14 * 14,  96,  24});
+  b->Args({14 * 14,  48,  96});
+  b->Args({ 7 *  7,  96,  48});
+  b->Args({ 7 *  7,  48, 192});
+  b->Args({ 7 *  7, 192,  48});
+}
+
+// ShuffleNet v2 (0.5X scale)
+static void ShuffleNetV2X05SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*          M       N    K */
+  b->Args({56 * 56,   24,  24});
+  b->Args({28 * 28,   24,  24});
+  b->Args({28 * 28,   48,  48});
+  b->Args({14 * 14,   48,  48});
+  b->Args({14 * 14,   96,  96});
+  b->Args({ 7 *  7,   96,  96});
+  b->Args({ 7 *  7, 1024, 192});
+}
+
+// ShuffleNet v2 (1.0X scale)
+static void ShuffleNetV2X10SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*          M       N    K */
+  b->Args({56 * 56,   58,  24});
+  b->Args({28 * 28,   58,  24});
+  b->Args({28 * 28,   58,  58});
+  b->Args({14 * 14,  116, 116});
+  b->Args({14 * 14,  116, 116});
+  b->Args({14 * 14,  232, 232});
+  b->Args({ 7 *  7,  232, 232});
+  b->Args({ 7 *  7, 1024, 464});
+}
+
+// ShuffleNet v2 (1.5X scale)
+static void ShuffleNetV2X15SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*          M       N    K */
+  b->Args({56 * 56,   88,  24});
+  b->Args({28 * 28,   88,  24});
+  b->Args({28 * 28,   88,  88});
+  b->Args({28 * 28,  176, 176});
+  b->Args({14 * 14,  176, 176});
+  b->Args({14 * 14,  352, 352});
+  b->Args({ 7 *  7,  352, 352});
+  b->Args({ 7 *  7, 1024, 704});
+}
+
+// ShuffleNet v2 (2.0X scale)
+static void ShuffleNetV2X20SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*          M       N    K */
+  b->Args({56 * 56,  122,  24});
+  b->Args({28 * 28,  122,  24});
+  b->Args({28 * 28,  122, 122});
+  b->Args({28 * 28,  244, 244});
+  b->Args({14 * 14,  244, 244});
+  b->Args({14 * 14,  488, 488});
+  b->Args({ 7 *  7,  488, 488});
+  b->Args({ 7 *  7, 2048, 976});
+}
+
+static void MobileNetV1SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /*           M        N     K */
+  b->Args({112 * 112,   64,   32});
+  b->Args({ 56 *  56,  128,   64});
+  b->Args({ 56 *  56,  128,  128});
+  b->Args({ 28 *  28,  256,  128});
+  b->Args({ 28 *  28,  256,  256});
+  b->Args({ 14 *  14,  512,  256});
+  b->Args({ 14 *  14,  512,  512});
+  b->Args({  7 *   7, 1024,  512});
+  b->Args({  7 *   7, 1024, 1024});
+}
+
+static void MobileNetV2SpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /******** Bottleneck 1 *******/
+  /*           M        N    K */
+  b->Args({112 * 112,   16,  32});
+  /******** Bottleneck 2 *******/
+  /*           M        N    K */
+  b->Args({112 * 112,   96,  16});
+  b->Args({ 56 *  56,   24,  96});
+  b->Args({ 56 *  56,  144,  24});
+  b->Args({ 56 *  56,   24, 144});
+  /******** Bottleneck 3 *******/
+  /*           M        N    K */
+  b->Args({ 28 *  28,   32, 144});
+  b->Args({ 28 *  28,  192,  32});
+  b->Args({ 28 *  28,   32, 192});
+  /******** Bottleneck 4 *******/
+  /*           M        N    K */
+  b->Args({ 14 *  14,   64, 192});
+  b->Args({ 14 *  14,  384,  64});
+  b->Args({ 14 *  14,   64, 384});
+  /******** Bottleneck 5 *******/
+  /*           M        N    K */
+  b->Args({ 14 *  14,   96, 384});
+  b->Args({ 14 *  14,  576,  96});
+  b->Args({ 14 *  14,   96, 576});
+  /******** Bottleneck 6 *******/
+  /*           M        N    K */
+  b->Args({  7 *   7,  160, 576});
+  b->Args({  7 *   7,  960, 160});
+  b->Args({  7 *   7,  160, 960});
+  /******** Bottleneck 7 *******/
+  /*           M        N    K */
+  b->Args({  7 *   7,  320, 960});
+  /***** Pre-pooling Conv2D ****/
+  /*           M        N    K */
+  b->Args({  7 *   7, 1280, 320});
+}
+
+static void MobileNetV3SmallSpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /****** Bottleneck 1 ******/
+  /*          M      N    K */
+  b->Args({ 1 *  1,   8,  16});
+  b->Args({ 1 *  1,  16,   8});
+  b->Args({56 * 56,  16,  16});
+  /****** Bottleneck 2 ******/
+  /*          M      N    K */
+  b->Args({56 * 56,  72,  16});
+  b->Args({28 * 28,  24,  72});
+  /****** Bottleneck 3 ******/
+  /*          M      N    K */
+  b->Args({28 * 28,  88,  24});
+  b->Args({28 * 28,  24,  88});
+  /****** Bottleneck 4 ******/
+  /*          M      N    K */
+  b->Args({28 * 28,  96,  24});
+  b->Args({ 1 *  1,  24,  96});
+  b->Args({ 1 *  1,  96,  24});
+  b->Args({14 * 14,  40,  96});
+  /****** Bottleneck 5 ******/
+  /*          M      N    K */
+  b->Args({14 * 14, 240,  40});
+  b->Args({ 1 *  1,  64, 240});
+  b->Args({ 1 *  1, 240,  64});
+  b->Args({14 * 14,  40, 240});
+  /****** Bottleneck 6 ******/
+  /*          M      N    K */
+//b->Args({14 * 14, 240,  40});
+//b->Args({ 1 *  1,  64, 240});
+//b->Args({ 1 *  1, 240,  64});
+//b->Args({14 * 14,  40, 240});
+  /****** Bottleneck 7 ******/
+  /*          M      N    K */
+  b->Args({14 * 14, 120,  40});
+  b->Args({ 1 *  1,  32, 120});
+  b->Args({ 1 *  1, 120,  32});
+  b->Args({14 * 14,  48, 120});
+  /****** Bottleneck 8 ******/
+  /*          M      N    K */
+  b->Args({14 * 14, 144,  48});
+  b->Args({ 1 *  1,  40, 144});
+  b->Args({ 1 *  1, 144,  40});
+  b->Args({14 * 14,  48, 144});
+  /****** Bottleneck 9 ******/
+  /*          M      N    K */
+  b->Args({14 * 14, 288,  48});
+  b->Args({ 1 *  1,  72, 288});
+  b->Args({ 1 *  1, 288,  72});
+  b->Args({ 7 *  7,  96, 288});
+  /****** Bottleneck 10 *****/
+  /*          M      N     K */
+  b->Args({ 7 *  7, 576,  96});
+  b->Args({ 1 *  1, 144, 576});
+  b->Args({ 1 *  1, 576, 144});
+  b->Args({ 7 *  7,  96, 576});
+  /****** Bottleneck 11 *****/
+  /*          M      N    K */
+//b->Args({ 7 *  7, 576,  96});
+//b->Args({ 1 *  1, 144, 576});
+//b->Args({ 1 *  1, 576, 144});
+//b->Args({ 7 *  7,  96, 576});
+  /******* Last Stage *******/
+  /*          M      N    K */
+//b->Args({ 7 *  7, 576,  96});
+}
+
+static void MobileNetV3LargeSpmmArguments(benchmark::internal::Benchmark* b) {
+  b->ArgNames({"M", "N", "K"});
+
+  /******* Bottleneck 1 *******/
+  /*           M       N    K */
+  b->Args({112 * 112,  16,  16});
+  /******* Bottleneck 2 *******/
+  /*           M       N    K */
+  b->Args({112 * 112,  64,  16});
+  b->Args({ 56 *  56,  24,  64});
+  /******* Bottleneck 3 *******/
+  /*           M       N    K */
+  b->Args({ 56 *  56,  72,  24});
+  b->Args({ 56 *  56,  24,  72});
+  /******* Bottleneck 4 *******/
+  /*           M       N    K */
+//b->Args({ 56 *  56,  72,  24});
+  b->Args({  1 *   1,  24,  72});
+  b->Args({  1 *   1,  72,  24});
+  b->Args({ 28 *  28,  40,  72});
+  /******* Bottleneck 5 *******/
+  /*           M       N    K */
+  b->Args({ 28 *  28, 120,  40});
+  b->Args({  1 *   1,  32, 120});
+  b->Args({  1 *   1, 120,  32});
+  b->Args({ 28 *  28,  40, 120});
+  /******* Bottleneck 6 *******/
+  /*           M       N    K */
+//b->Args({ 28 *  28, 120,  40});
+//b->Args({  1 *   1,  32, 120});
+//b->Args({  1 *   1, 120,  32});
+//b->Args({ 28 *  28,  40, 120});
+  /******* Bottleneck 7 *******/
+  /*           M       N    K */
+  b->Args({ 28 *  28, 240,  40});
+  b->Args({ 14 *  14,  80, 240});
+  /******* Bottleneck 8 *******/
+  /*           M       N    K */
+  b->Args({ 14 *  14, 200,  80});
+  b->Args({ 14 *  14,  80, 200});
+  /******* Bottleneck 9 *******/
+  /*           M       N    K */
+  b->Args({ 14 *  14, 184,  80});
+  b->Args({ 14 *  14,  80, 184});
+  /******* Bottleneck 10 ******/
+  /*           M       N    K */
+  b->Args({ 14 *  14, 184,  80});
+  b->Args({ 14 *  14,  80, 184});
+  /******* Bottleneck 11 ******/
+  /*           M       N    K */
+  b->Args({ 14 *  14, 480,  80});
+  b->Args({  1 *   1, 120, 480});
+  b->Args({  1 *   1, 480, 120});
+  b->Args({ 14 *  14, 112, 480});
+  /******* Bottleneck 12 ******/
+  /*           M       N    K */
+  b->Args({ 14 *  14, 672, 112});
+  b->Args({  1 *   1, 168, 672});
+  b->Args({  1 *   1, 672, 168});
+  b->Args({ 14 *  14, 112, 672});
+  /******* Bottleneck 13 ******/
+  /*           M       N    K */
+//b->Args({ 14 *  14, 672, 112});
+//b->Args({  1 *   1, 168, 672});
+//b->Args({  1 *   1, 672, 168});
+  b->Args({  7 *   7, 160, 672});
+  /******* Bottleneck 14 ******/
+  /*           M       N    K */
+  b->Args({  7 *   7, 960, 160});
+  b->Args({  1 *   1, 240, 960});
+  b->Args({  1 *   1, 960, 240});
+  b->Args({  7 *   7, 160, 960});
+  /******* Bottleneck 15 ******/
+  /*           M       N    K */
+//b->Args({  7 *   7, 960, 160});
+//b->Args({  1 *   1, 240, 960});
+//b->Args({  1 *   1, 960, 240});
+//b->Args({  7 *   7, 160, 960});
+  /******** Last Stage  *******/
+  /*           M       N    K */
+//b->Args({  7 *   7, 960, 160});
+}
diff --git a/bench/square-root.cc b/bench/square-root.cc
index 693dcb2..3c1f10b 100644
--- a/bench/square-root.cc
+++ b/bench/square-root.cc
@@ -77,7 +77,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
@@ -184,7 +187,10 @@
     }
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
diff --git a/bench/truncation.cc b/bench/truncation.cc
index b87dc05..8d63645 100644
--- a/bench/truncation.cc
+++ b/bench/truncation.cc
@@ -68,7 +68,10 @@
     return;
   }
 
-  state.counters["Freq"] = benchmark::utils::GetCurrentCpuFrequency();
+  const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
+  if (cpu_frequency != 0) {
+    state.counters["cpufreq"] = cpu_frequency;
+  }
 
   const size_t elements_per_iteration = batch_size * channels;
   state.counters["elements"] =
diff --git a/build_defs.bzl b/build_defs.bzl
index b6bec17..1ce9482 100644
--- a/build_defs.bzl
+++ b/build_defs.bzl
@@ -69,7 +69,6 @@
 def xnnpack_cc_library(
         name,
         srcs = [],
-        psimd_srcs = [],
         x86_srcs = [],
         aarch32_srcs = [],
         aarch64_srcs = [],
@@ -102,7 +101,6 @@
     Args:
       name: The name of the library target to define.
       srcs: The list of architecture-independent source files.
-      psimd_srcs: The list of psimd-specific source files.
       x86_srcs: The list of x86-specific source files.
       aarch32_srcs: The list of AArch32-specific source files.
       aarch64_srcs: The list of AArch64-specific source files.
@@ -141,34 +139,34 @@
     native.cc_library(
         name = name,
         srcs = srcs + select({
-            ":linux_k8": psimd_srcs + x86_srcs,
-            ":linux_arm": psimd_srcs + aarch32_srcs,
-            ":linux_armeabi": psimd_srcs + aarch32_srcs,
-            ":linux_armhf": psimd_srcs + aarch32_srcs,
-            ":linux_armv7a": psimd_srcs + aarch32_srcs,
-            ":linux_aarch64": psimd_srcs + aarch64_srcs,
-            ":macos_x86_64": psimd_srcs + x86_srcs,
-            ":windows_x86_64_clang": psimd_srcs + x86_srcs,
-            ":windows_x86_64_mingw": psimd_srcs + x86_srcs,
-            ":windows_x86_64_msys": psimd_srcs + x86_srcs,
+            ":linux_k8": x86_srcs,
+            ":linux_arm": aarch32_srcs,
+            ":linux_armeabi": aarch32_srcs,
+            ":linux_armhf": aarch32_srcs,
+            ":linux_armv7a": aarch32_srcs,
+            ":linux_aarch64": aarch64_srcs,
+            ":macos_x86_64": x86_srcs,
+            ":windows_x86_64_clang": x86_srcs,
+            ":windows_x86_64_mingw": x86_srcs,
+            ":windows_x86_64_msys": x86_srcs,
             ":windows_x86_64": x86_srcs,
-            ":android_armv7": psimd_srcs + aarch32_srcs,
-            ":android_arm64": psimd_srcs + aarch64_srcs,
-            ":android_x86": psimd_srcs + x86_srcs,
-            ":android_x86_64": psimd_srcs + x86_srcs,
-            ":ios_armv7": psimd_srcs + aarch32_srcs,
-            ":ios_arm64": psimd_srcs + aarch64_srcs,
-            ":ios_arm64e": psimd_srcs + aarch64_srcs,
-            ":ios_x86": psimd_srcs + x86_srcs,
-            ":ios_x86_64": psimd_srcs + x86_srcs,
-            ":watchos_armv7k": psimd_srcs + aarch32_srcs,
-            ":watchos_arm64_32": psimd_srcs + aarch64_srcs,
-            ":watchos_x86": psimd_srcs + x86_srcs,
-            ":watchos_x86_64": psimd_srcs + x86_srcs,
-            ":tvos_arm64": psimd_srcs + aarch64_srcs,
-            ":tvos_x86_64": psimd_srcs + x86_srcs,
+            ":android_armv7": aarch32_srcs,
+            ":android_arm64": aarch64_srcs,
+            ":android_x86": x86_srcs,
+            ":android_x86_64": x86_srcs,
+            ":ios_armv7": aarch32_srcs,
+            ":ios_arm64": aarch64_srcs,
+            ":ios_arm64e": aarch64_srcs,
+            ":ios_x86": x86_srcs,
+            ":ios_x86_64": x86_srcs,
+            ":watchos_armv7k": aarch32_srcs,
+            ":watchos_arm64_32": aarch64_srcs,
+            ":watchos_x86": x86_srcs,
+            ":watchos_x86_64": x86_srcs,
+            ":tvos_arm64": aarch64_srcs,
+            ":tvos_x86_64": x86_srcs,
             ":emscripten_wasm": wasm_srcs,
-            ":emscripten_wasmsimd": psimd_srcs + wasmsimd_srcs,
+            ":emscripten_wasmsimd": wasmsimd_srcs,
             "//conditions:default": [],
         }),
         copts = [
@@ -235,7 +233,6 @@
 def xnnpack_aggregate_library(
         name,
         generic_deps = [],
-        psimd_deps = [],
         x86_deps = [],
         aarch32_deps = [],
         aarch64_deps = [],
@@ -246,7 +243,6 @@
     Args:
       name: The name of the library target to define.
       generic_deps: The list of libraries to link on all architectures.
-      psimd_deps: The list of libraries to link in psimd-enabled builds.
       x86_deps: The list of libraries to link in x86 and x86-64 builds.
       aarch32_deps: The list of libraries to link in AArch32 builds.
       aarch64_deps: The list of libraries to link in AArch32 builds.
@@ -258,38 +254,38 @@
         name = name,
         linkstatic = True,
         deps = generic_deps + select({
-            ":linux_k8": psimd_deps + x86_deps,
-            ":linux_arm": psimd_deps + aarch32_deps,
-            ":linux_armeabi": psimd_deps + aarch32_deps,
-            ":linux_armhf": psimd_deps + aarch32_deps,
-            ":linux_armv7a": psimd_deps + aarch32_deps,
-            ":linux_aarch64": psimd_deps + aarch64_deps,
-            ":macos_x86_64": psimd_deps + x86_deps,
-            ":windows_x86_64_clang": psimd_deps + x86_deps,
-            ":windows_x86_64_mingw": psimd_deps + x86_deps,
-            ":windows_x86_64_msys": psimd_deps + x86_deps,
+            ":linux_k8": x86_deps,
+            ":linux_arm": aarch32_deps,
+            ":linux_armeabi": aarch32_deps,
+            ":linux_armhf": aarch32_deps,
+            ":linux_armv7a": aarch32_deps,
+            ":linux_aarch64": aarch64_deps,
+            ":macos_x86_64": x86_deps,
+            ":windows_x86_64_clang": x86_deps,
+            ":windows_x86_64_mingw": x86_deps,
+            ":windows_x86_64_msys": x86_deps,
             ":windows_x86_64": x86_deps,
-            ":android_armv7": psimd_deps + aarch32_deps,
-            ":android_arm64": psimd_deps + aarch64_deps,
-            ":android_x86": psimd_deps + x86_deps,
-            ":android_x86_64": psimd_deps + x86_deps,
-            ":ios_armv7": psimd_deps + aarch32_deps,
-            ":ios_arm64": psimd_deps + aarch64_deps,
-            ":ios_arm64e": psimd_deps + aarch64_deps,
-            ":ios_x86": psimd_deps + x86_deps,
-            ":ios_x86_64": psimd_deps + x86_deps,
-            ":watchos_armv7k": psimd_deps + aarch32_deps,
-            ":watchos_arm64_32": psimd_deps + aarch64_deps,
-            ":watchos_x86": psimd_deps + x86_deps,
-            ":watchos_x86_64": psimd_deps + x86_deps,
-            ":tvos_arm64": psimd_deps + aarch64_deps,
-            ":tvos_x86_64": psimd_deps + x86_deps,
+            ":android_armv7": aarch32_deps,
+            ":android_arm64": aarch64_deps,
+            ":android_x86": x86_deps,
+            ":android_x86_64": x86_deps,
+            ":ios_armv7": aarch32_deps,
+            ":ios_arm64": aarch64_deps,
+            ":ios_arm64e": aarch64_deps,
+            ":ios_x86": x86_deps,
+            ":ios_x86_64": x86_deps,
+            ":watchos_armv7k": aarch32_deps,
+            ":watchos_arm64_32": aarch64_deps,
+            ":watchos_x86": x86_deps,
+            ":watchos_x86_64": x86_deps,
+            ":tvos_arm64": aarch64_deps,
+            ":tvos_x86_64": x86_deps,
             ":emscripten_wasm": wasm_deps,
-            ":emscripten_wasmsimd": psimd_deps + wasmsimd_deps,
+            ":emscripten_wasmsimd": wasmsimd_deps,
         }),
     )
 
-def xnnpack_unit_test(name, srcs, copts = [], mingw_copts = [], msys_copts = [], deps = [], tags = [], automatic = True):
+def xnnpack_unit_test(name, srcs, copts = [], mingw_copts = [], msys_copts = [], deps = [], tags = [], automatic = True,  timeout = "short"):
     """Unit test binary based on Google Test.
 
     Args:
@@ -303,6 +299,9 @@
       deps: The list of additional libraries to be linked. Google Test library
             (with main() function) is always added as a dependency and does not
             need to be explicitly specified.
+      tags: List of arbitrary text tags.
+      automatic: Whether to create the test or testable binary.
+      timeout: How long the test is expected to run before returning.
     """
 
     if automatic:
@@ -335,6 +334,7 @@
                 "//conditions:default": [],
             }),
             tags = tags,
+            timeout = timeout,
         )
     else:
         native.cc_binary(
diff --git a/cmake/DownloadCpuinfo.cmake b/cmake/DownloadCpuinfo.cmake
index 49e77a7..a274c14 100644
--- a/cmake/DownloadCpuinfo.cmake
+++ b/cmake/DownloadCpuinfo.cmake
@@ -12,12 +12,12 @@
 
 INCLUDE(ExternalProject)
 ExternalProject_Add(cpuinfo
-  URL https://github.com/pytorch/cpuinfo/archive/6cecd15784fcb6c5c0aa7311c6248879ce2cb8b2.zip
-  URL_HASH SHA256=b1f2ee97e46d8917a66bcb47452fc510d511829556c93b83e06841b9b35261a5
+  URL https://github.com/pytorch/cpuinfo/archive/5916273f79a21551890fd3d56fc5375a78d1598d.zip
+  URL_HASH SHA256=2a160c527d3c58085ce260f34f9e2b161adc009b34186a2baf24e74376e89e6d
   SOURCE_DIR "${CMAKE_BINARY_DIR}/cpuinfo-source"
   BINARY_DIR "${CMAKE_BINARY_DIR}/cpuinfo"
   CONFIGURE_COMMAND ""
-  PATCH_COMMAND patch -p0 -i "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cpuinfo.patch"
+  PATCH_COMMAND ""
   BUILD_COMMAND ""
   INSTALL_COMMAND ""
   TEST_COMMAND ""
diff --git a/cmake/DownloadGoogleTest.cmake b/cmake/DownloadGoogleTest.cmake
index 0223099..b02d36c 100644
--- a/cmake/DownloadGoogleTest.cmake
+++ b/cmake/DownloadGoogleTest.cmake
@@ -12,8 +12,8 @@
 
 INCLUDE(ExternalProject)
 ExternalProject_Add(googletest
-  URL https://github.com/google/googletest/archive/release-1.10.0.zip
-  URL_HASH SHA256=94c634d499558a76fa649edb13721dce6e98fb1e7018dfaeba3cd7a083945e91
+  URL https://github.com/google/googletest/archive/5a509dbd2e5a6c694116e329c5a20dc190653724.zip
+  URL_HASH SHA256=fcfac631041fce253eba4fc014c28fd620e33e3758f64f8ed5487cc3e1840e3d
   SOURCE_DIR "${CMAKE_BINARY_DIR}/googletest-source"
   BINARY_DIR "${CMAKE_BINARY_DIR}/googletest"
   CONFIGURE_COMMAND ""
diff --git a/cmake/DownloadPSimd.cmake b/cmake/DownloadPSimd.cmake
deleted file mode 100644
index 83badc8..0000000
--- a/cmake/DownloadPSimd.cmake
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# Copyright 2019 Google LLC
-#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree.
-
-CMAKE_MINIMUM_REQUIRED(VERSION 3.5 FATAL_ERROR)
-
-PROJECT(psimd-download NONE)
-
-INCLUDE(ExternalProject)
-ExternalProject_Add(psimd
-  URL https://github.com/Maratyszcza/psimd/archive/072586a71b55b7f8c584153d223e95687148a900.zip
-  URL_HASH SHA256=dc615342bcbe51ca885323e51b68b90ed9bb9fa7df0f4419dbfa0297d5e837b7
-  SOURCE_DIR "${CMAKE_BINARY_DIR}/psimd-source"
-  BINARY_DIR "${CMAKE_BINARY_DIR}/psimd"
-  CONFIGURE_COMMAND ""
-  BUILD_COMMAND ""
-  INSTALL_COMMAND ""
-  TEST_COMMAND ""
-)
diff --git a/cmake/DownloadPThreadPool.cmake b/cmake/DownloadPThreadPool.cmake
index c5676f9..c425f2e 100644
--- a/cmake/DownloadPThreadPool.cmake
+++ b/cmake/DownloadPThreadPool.cmake
@@ -12,8 +12,8 @@
 
 INCLUDE(ExternalProject)
 ExternalProject_Add(pthreadpool
-  URL https://github.com/Maratyszcza/pthreadpool/archive/029c88620802e1361ccf41d1970bd5b07fd6b7bb.zip
-  URL_HASH SHA256=03312bd7d8d9e379d685258963ee8820767158b5946cdd00336ff17dae851001
+  URL https://github.com/Maratyszcza/pthreadpool/archive/545ebe9f225aec6dca49109516fac02e973a3de2.zip
+  URL_HASH SHA256=8461f6540ae9f777ce20d1c0d1d249e5e61c438744fb390c0c6f91940aa69ea3
   SOURCE_DIR "${CMAKE_BINARY_DIR}/pthreadpool-source"
   BINARY_DIR "${CMAKE_BINARY_DIR}/pthreadpool"
   CONFIGURE_COMMAND ""
diff --git a/cmake/cpuinfo.patch b/cmake/cpuinfo.patch
deleted file mode 100644
index 016a785..0000000
--- a/cmake/cpuinfo.patch
+++ /dev/null
@@ -1,130 +0,0 @@
-diff --git include/cpuinfo.h include/cpuinfo.h
-index 6c67c34..85ce174 100644
---- include/cpuinfo.h
-+++ include/cpuinfo.h
-@@ -417,6 +417,8 @@ enum cpuinfo_uarch {
- 	cpuinfo_uarch_cortex_a76   = 0x00300376,
- 	/** ARM Cortex-A77. */
- 	cpuinfo_uarch_cortex_a77   = 0x00300377,
-+	/** ARM Cortex-A78. */
-+	cpuinfo_uarch_cortex_a78   = 0x00300378,
- 
- 	/** ARM Neoverse N1. */
- 	cpuinfo_uarch_neoverse_n1  = 0x00300400,
-@@ -1434,6 +1436,7 @@ static inline bool cpuinfo_has_x86_sha(void) {
- 			bool armv6k;
- 			bool armv7;
- 			bool armv7mp;
-+			bool armv8;
- 			bool idiv;
- 
- 			bool vfpv2;
-@@ -1521,6 +1524,16 @@ static inline bool cpuinfo_has_arm_v7mp(void) {
- 	#endif
- }
- 
-+static inline bool cpuinfo_has_arm_v8(void) {
-+	#if CPUINFO_ARCH_ARM64
-+		return true;
-+	#elif CPUINFO_ARCH_ARM
-+		return cpuinfo_isa.armv8;
-+	#else
-+		return false;
-+	#endif
-+}
-+
- static inline bool cpuinfo_has_arm_idiv(void) {
- 	#if CPUINFO_ARCH_ARM64
- 		return true;
-@@ -1645,6 +1658,16 @@ static inline bool cpuinfo_has_arm_neon_fma(void) {
- 	#endif
- }
- 
-+static inline bool cpuinfo_has_arm_neon_v8(void) {
-+	#if CPUINFO_ARCH_ARM64
-+		return true;
-+	#elif CPUINFO_ARCH_ARM
-+		return cpuinfo_isa.neon && cpuinfo_isa.armv8;
-+	#else
-+		return false;
-+	#endif
-+}
-+
- static inline bool cpuinfo_has_arm_atomics(void) {
- 	#if CPUINFO_ARCH_ARM64
- 		return cpuinfo_isa.atomics;
-diff --git src/arm/linux/aarch32-isa.c src/arm/linux/aarch32-isa.c
-index 64dd168..41f9972 100644
---- src/arm/linux/aarch32-isa.c
-+++ src/arm/linux/aarch32-isa.c
-@@ -43,6 +43,7 @@ void cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo(
- 		isa->armv6k  = true;
- 		isa->armv7   = true;
- 		isa->armv7mp = true;
-+		isa->armv8   = true;
- 		isa->thumb  = true;
- 		isa->thumb2 = true;
- 		isa->idiv = true;
-diff --git src/arm/mach/init.c src/arm/mach/init.c
-index 058cfc2..e912de6 100644
---- src/arm/mach/init.c
-+++ src/arm/mach/init.c
-@@ -307,6 +307,7 @@ void cpuinfo_arm_mach_init(void) {
- 		case CPU_TYPE_ARM:
- 			switch (cpu_subtype) {
- 				case CPU_SUBTYPE_ARM_V8:
-+					cpuinfo_isa.armv8 = true;
- 					cpuinfo_isa.aes = true;
- 					cpuinfo_isa.sha1 = true;
- 					cpuinfo_isa.sha2 = true;
-diff --git src/arm/midr.h src/arm/midr.h
-index 34d7780..2638517 100644
---- src/arm/midr.h
-+++ src/arm/midr.h
-@@ -183,6 +183,7 @@ inline static uint32_t midr_score_core(uint32_t midr) {
- 		case UINT32_C(0x51008000): /* Kryo 260 / 280 Gold */
- 		case UINT32_C(0x51002050): /* Kryo Gold */
- 		case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */
-+		case UINT32_C(0x4100D410): /* Cortex-A78 */
- 		case UINT32_C(0x4100D0D0): /* Cortex-A77 */
- 		case UINT32_C(0x4100D0E0): /* Cortex-A76AE */
- 		case UINT32_C(0x4100D0B0): /* Cortex-A76 */
-diff --git src/arm/uarch.c src/arm/uarch.c
-index 55b61df..0d7a7d7 100644
---- src/arm/uarch.c
-+++ src/arm/uarch.c
-@@ -91,6 +91,9 @@ void cpuinfo_arm_decode_vendor_uarch(
- 				case 0xD0E: /* Cortex-A76AE */
- 					*uarch = cpuinfo_uarch_cortex_a76;
- 					break;
-+				case 0xD41: /* Cortex-A78 */
-+					*uarch = cpuinfo_uarch_cortex_a78;
-+					break;
- #if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__)
- 				case 0xD4A:
- 					*uarch = cpuinfo_uarch_neoverse_e1;
-diff --git tools/cpu-info.c tools/cpu-info.c
-index 2759068..429bbfa 100644
---- tools/cpu-info.c
-+++ tools/cpu-info.c
-@@ -183,6 +183,8 @@ static const char* uarch_to_string(enum cpuinfo_uarch uarch) {
- 			return "Cortex-A76";
- 		case cpuinfo_uarch_cortex_a77:
- 			return "Cortex-A77";
-+		case cpuinfo_uarch_cortex_a78:
-+			return "Cortex-A78";
- 		case cpuinfo_uarch_scorpion:
- 			return "Scorpion";
- 		case cpuinfo_uarch_krait:
-diff --git tools/isa-info.c tools/isa-info.c
-index 98ef919..8365846 100644
---- tools/isa-info.c
-+++ tools/isa-info.c
-@@ -121,6 +121,7 @@ int main(int argc, char** argv) {
- 		printf("\tARMv6-K: %s\n", cpuinfo_has_arm_v6k() ? "yes" : "no");
- 		printf("\tARMv7: %s\n", cpuinfo_has_arm_v7() ? "yes" : "no");
- 		printf("\tARMv7 MP: %s\n", cpuinfo_has_arm_v7mp() ? "yes" : "no");
-+		printf("\tARMv8: %s\n", cpuinfo_has_arm_v8() ? "yes" : "no");
- 		printf("\tIDIV: %s\n", cpuinfo_has_arm_idiv() ? "yes" : "no");
- 
- 	printf("Floating-Point support:\n");
diff --git a/eval/f32-exp-ulp.cc b/eval/f32-exp-ulp.cc
new file mode 100644
index 0000000..127db08
--- /dev/null
+++ b/eval/f32-exp-ulp.cc
@@ -0,0 +1,212 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <algorithm>
+#include <cfloat>
+#include <cmath>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <vector>
+
+#include <cpuinfo.h>
+#include <pthreadpool.h>
+
+#include <benchmark/benchmark.h>
+#include <fp16/fp16.h>
+
+#include "bench/utils.h"
+#include <xnnpack/AlignedAllocator.h>
+#include <xnnpack/common.h>
+#include <xnnpack/math-stubs.h>
+
+
+struct ComputeErrorContext {
+  const float* input;
+  const float* output;
+  float* error;
+};
+
+static void ComputeError(
+  struct ComputeErrorContext* context,
+  size_t start,
+  size_t range)
+{
+  const float* input = context->input;
+  const float* output = context->output;
+  float* error = context->error;
+  for (size_t i = start; i < start + range; i++) {
+    const double output_ref = std::exp(double(input[i]));
+    const double abs_error = std::abs(output_ref - double(output[i]));
+    const float output_abs = std::abs(output_ref);
+    const float output_ulp = fp32_from_bits(fp32_to_bits(output_abs) + 1) - output_abs;
+    error[i] = float(abs_error / output_ulp);
+  }
+}
+
+static void ExpError(
+  benchmark::State& state,
+  xnn_f32_unary_math_function exp,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
+{
+  if (!cpuinfo_initialize()) {
+    state.SkipWithError("failed cpuinfo init");
+    return;
+  }
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
+  // The smallest x for which expf(x) is non-zero (-0x1.9FE368p+6f).
+  const uint32_t min_input = 0xC2CFF1B4;
+  // The largest x for which expf(x) is finite (0x1.62E42Ep6f).
+  const uint32_t max_input = 0x42B17217;
+  // Number of elements in one block of inputs/outputs.
+  // Combining multiple elements in a block reduce function call overhead.
+  const size_t block_size = 16384;
+  // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
+  const size_t tile_size = 64;
+
+  uint32_t num_threads = cpuinfo_get_cores_count();
+  #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+    // Use all cores except for the least performant cluster
+    if (cpuinfo_get_clusters_count() > 1) {
+      num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
+    }
+  #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+  std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
+    pthreadpool_create(num_threads), pthreadpool_destroy);
+
+  std::vector<float, AlignedAllocator<float, 64>> x(block_size);
+  std::vector<float, AlignedAllocator<float, 64>> y(block_size);
+  std::vector<float> ulp_error(block_size);
+  float max_ulp_error = 0.0f;
+
+  ComputeErrorContext context;
+  context.input = x.data();
+  context.output = y.data();
+  context.error = ulp_error.data();
+  for (auto _ : state) {
+    for (uint32_t n = min_input; int32_t(n) < 0; n -= block_size) {
+      for (uint32_t i = 0; i < block_size; i++) {
+        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
+      }
+      std::fill(y.begin(), y.end(), std::nanf(""));
+
+      exp(block_size * sizeof(float), x.data(), y.data());
+
+      pthreadpool_parallelize_1d_tile_1d(
+          threadpool.get(),
+          reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
+          static_cast<void*>(&context),
+          block_size, tile_size, 0 /* flags */);
+
+      max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
+        static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
+    }
+    for (uint32_t n = 0; n < max_input; n += block_size) {
+      for (uint32_t i = 0; i < block_size; i++) {
+        x[i] = fp32_from_bits(std::min<uint32_t>(n + i, max_input));
+      }
+      std::fill(y.begin(), y.end(), std::nanf(""));
+
+      exp(block_size * sizeof(float), x.data(), y.data());
+
+      pthreadpool_parallelize_1d_tile_1d(
+          threadpool.get(),
+          reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
+          static_cast<void*>(&context),
+          block_size, tile_size, 0 /* flags */);
+
+      max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
+        static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
+    }
+  }
+
+  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
+}
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  BENCHMARK_CAPTURE(ExpError, neonfma_rr2_lut64_p2,
+                    xnn_math_f32_exp__neonfma_rr2_lut64_p2,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, neonfma_rr2_p5,
+                    xnn_math_f32_exp__neonfma_rr2_p5,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  BENCHMARK_CAPTURE(ExpError, avx512f_rr2_lut16_p3_perm,
+                    xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, avx512f_rr2_lut16_p3_perm_scalef,
+                    xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm_scalef,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, avx512f_rr2_lut32_p2_perm2,
+                    xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, avx512f_rr2_lut32_p2_perm2_scalef,
+                    xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2_scalef,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, avx512f_rr2_p5,
+                    xnn_math_f32_exp__avx512f_rr2_p5,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, avx512f_rr2_p5_scalef,
+                    xnn_math_f32_exp__avx512f_rr2_p5_scalef,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(ExpError, avx2_rr2_lut8_p3_perm,
+                    xnn_math_f32_exp__avx2_rr2_lut8_p3_perm,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, avx2_rr2_lut8_p4_perm,
+                    xnn_math_f32_exp__avx2_rr2_lut8_p4_perm,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, avx2_rr2_p5,
+                    xnn_math_f32_exp__avx2_rr2_p5,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(ExpError, avx_rr2_p5,
+                    xnn_math_f32_exp__avx_rr2_p5,
+                    benchmark::utils::CheckAVX)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(ExpError, sse2_rr2_lut64_p2,
+                    xnn_math_f32_exp__sse2_rr2_lut64_p2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, sse2_rr2_p5,
+                    xnn_math_f32_exp__sse2_rr2_p5)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+#ifndef XNNPACK_BENCHMARK_NO_MAIN
+BENCHMARK_MAIN();
+#endif
diff --git a/eval/f32-exp.cc b/eval/f32-exp.cc
index 8b12fb1..0aa6b1d 100644
--- a/eval/f32-exp.cc
+++ b/eval/f32-exp.cc
@@ -1,139 +1,1499 @@
-// Copyright 2019 Google LLC
+// Copyright 2020 Google LLC
 //
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
 #include <algorithm>
-#include <cfloat>
 #include <cmath>
-#include <functional>
-#include <random>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <iomanip>
+#include <ios>
 #include <vector>
 
-#include <benchmark/benchmark.h>
-#include <fp16/fp16.h>
+#include <gtest/gtest.h>
+
+#include <fp16.h>
 
 #include <xnnpack/AlignedAllocator.h>
 #include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
 #include <xnnpack/math-stubs.h>
 
 
-static void ExpError(benchmark::State& state,
-  xnn_f32_unary_math_function exp,
-  size_t tile_size)
-{
-  // The smallest x for which expf(x) is non-zero (-0x1.9FE368p+6f).
-  const uint32_t min_input = 0xC2CFF1B4;
-  // The largest x for which expf(x) is finite (0x1.62E42Ep6f).
-  const uint32_t max_input = 0x42B17217;
-  // Number of tiles in one block of inputs/outputs. Combining multiple tiles in a block reduce function call overhead.
-  const size_t num_tiles = 100;
+constexpr int kBlockSize = 1024;
 
-  double max_ulp_error = 0.0;
-  std::vector<float, AlignedAllocator<float, 64>> x(tile_size * num_tiles);
-  std::vector<float, AlignedAllocator<float, 64>> y(tile_size * num_tiles);
-  for (auto _ : state) {
-    for (uint32_t n = min_input; int32_t(n) < 0; n -= tile_size * num_tiles) {
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
-      }
-      std::fill(y.begin(), y.end(), std::nanf(""));
-
-      exp(tile_size * num_tiles * sizeof(float), x.data(), y.data());
-
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        const double y_ref = std::exp(double(x[i]));
-        const double abs_error = std::abs(y_ref - double(y[i]));
-        const float y_abs = std::abs(y_ref);
-        const float y_ulp = fp32_from_bits(fp32_to_bits(y_abs) + 1) - y_abs;
-        max_ulp_error = std::max<double>(max_ulp_error, abs_error / y_ulp);
-      }
-    }
-    for (uint32_t n = 0; n < max_input; n += tile_size * num_tiles) {
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        x[i] = fp32_from_bits(std::min<uint32_t>(n + i, max_input));
-      }
-      std::fill(y.begin(), y.end(), std::nanf(""));
-
-      exp(tile_size * num_tiles * sizeof(float), x.data(), y.data());
-
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        const double y_ref = std::exp(double(x[i]));
-        const double abs_error = std::abs(y_ref - double(y[i]));
-        const float y_abs = std::abs(y_ref);
-        const float y_ulp = fp32_from_bits(fp32_to_bits(y_abs) + 1) - y_abs;
-        max_ulp_error = std::max<double>(max_ulp_error, abs_error / y_ulp);
-      }
-    }
-  }
-
-  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
-}
-
-#if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  static void f32_exp__sse2_rr2_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__sse2_rr2_p5, 4);
-  }
-  static void f32_exp__sse2_rr2_lut64_p2(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__sse2_rr2_lut64_p2, 4);
-  }
-  static void f32_exp__avx_rr2_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx_rr2_p5, 8);
-  }
-  static void f32_exp__avx2_rr2_lut8_p3_perm(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx2_rr2_lut8_p3_perm, 8);
-  }
-  static void f32_exp__avx2_rr2_lut8_p4_perm(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx2_rr2_lut8_p4_perm, 8);
-  }
-  static void f32_exp__avx2_rr2_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx2_rr2_p5, 8);
-  }
-  static void f32_exp__avx512f_rr2_lut16_p3_perm(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm, 16);
-  }
-  static void f32_exp__avx512f_rr2_lut16_p3_perm_scalef(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm_scalef, 16);
-  }
-  static void f32_exp__avx512f_rr2_lut32_p2_perm2(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2, 16);
-  }
-  static void f32_exp__avx512f_rr2_lut32_p2_perm2_scalef(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2_scalef, 16);
-  }
-  static void f32_exp__avx512f_rr2_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx512f_rr2_p5, 16);
-  }
-  static void f32_exp__avx512f_rr2_p5_scalef(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__avx512f_rr2_p5_scalef, 16);
-  }
-
-  BENCHMARK(f32_exp__sse2_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__sse2_rr2_lut64_p2)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx2_rr2_lut8_p4_perm)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx2_rr2_lut8_p3_perm)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx2_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx512f_rr2_lut16_p3_perm)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx512f_rr2_lut16_p3_perm_scalef)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx512f_rr2_lut32_p2_perm2)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx512f_rr2_lut32_p2_perm2_scalef)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx512f_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__avx512f_rr2_p5_scalef)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
-  static void f32_exp__neonfma_rr2_lut64_p2(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__neonfma_rr2_lut64_p2, 4);
-  }
-  static void f32_exp__neonfma_rr2_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_exp__neonfma_rr2_p5, 4);
+  TEST(EXP__NEONFMA_RR2_LUT64_P2, negative_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
   }
 
-  BENCHMARK(f32_exp__neonfma_rr2_lut64_p2)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_exp__neonfma_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
+  TEST(EXP__NEONFMA_RR2_LUT64_P2, positive_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__NEONFMA_RR2_LUT64_P2, negative_saturation) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__NEONFMA_RR2_LUT64_P2, positive_overflow) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__NEONFMA_RR2_LUT64_P2, positive_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__NEONFMA_RR2_LUT64_P2, negative_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
-#ifndef XNNPACK_BENCHMARK_NO_MAIN
-BENCHMARK_MAIN();
-#endif
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(EXP__NEONFMA_RR2_P5, negative_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__NEONFMA_RR2_P5, positive_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__NEONFMA_RR2_P5, negative_saturation) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__NEONFMA_RR2_P5, positive_overflow) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__NEONFMA_RR2_P5, positive_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__NEONFMA_RR2_P5, negative_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM, negative_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM, positive_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM, negative_saturation) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM, positive_overflow) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM, positive_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM, negative_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM_SCALEF, negative_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM_SCALEF, positive_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM_SCALEF, negative_saturation) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM_SCALEF, positive_overflow) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM_SCALEF, positive_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT16_P3_PERM_SCALEF, negative_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2, negative_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2, positive_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2, negative_saturation) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2, positive_overflow) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2, positive_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2, negative_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2_SCALEF, negative_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2_SCALEF, positive_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2_SCALEF, negative_saturation) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2_SCALEF, positive_overflow) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2_SCALEF, positive_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_LUT32_P2_PERM2_SCALEF, negative_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX512F_RR2_P5, negative_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_P5, positive_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_P5, negative_saturation) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_P5, positive_overflow) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_P5, positive_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_P5, negative_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX512F_RR2_P5_SCALEF, negative_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_P5_SCALEF, positive_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX512F_RR2_P5_SCALEF, negative_saturation) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_P5_SCALEF, positive_overflow) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_P5_SCALEF, positive_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX512F_RR2_P5_SCALEF, negative_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX2_RR2_LUT8_P3_PERM, negative_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P3_PERM, positive_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P3_PERM, negative_saturation) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P3_PERM, positive_overflow) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P3_PERM, positive_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P3_PERM, negative_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX2_RR2_LUT8_P4_PERM, negative_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P4_PERM, positive_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P4_PERM, negative_saturation) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P4_PERM, positive_overflow) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P4_PERM, positive_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX2_RR2_LUT8_P4_PERM, negative_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX2_RR2_P5, negative_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX2_RR2_P5, positive_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX2_RR2_P5, negative_saturation) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX2_RR2_P5, positive_overflow) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX2_RR2_P5, positive_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX2_RR2_P5, negative_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__AVX_RR2_P5, negative_zero) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX_RR2_P5, positive_zero) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__AVX_RR2_P5, negative_saturation) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX_RR2_P5, positive_overflow) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX_RR2_P5, positive_nan) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__AVX_RR2_P5, negative_nan) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__SSE2_RR2_LUT64_P2, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__SSE2_RR2_LUT64_P2, positive_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__SSE2_RR2_LUT64_P2, negative_saturation) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__SSE2_RR2_LUT64_P2, positive_overflow) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__SSE2_RR2_LUT64_P2, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__SSE2_RR2_LUT64_P2, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXP__SSE2_RR2_P5, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__SSE2_RR2_P5, positive_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXP__SSE2_RR2_P5, negative_saturation) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2CFF1B5); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__SSE2_RR2_P5, positive_overflow) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x42B17218); n <= UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0x7F800000)));
+      }
+      xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x7F800000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__SSE2_RR2_P5, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXP__SSE2_RR2_P5, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
diff --git a/eval/f32-expm1minus-ulp.cc b/eval/f32-expm1minus-ulp.cc
new file mode 100644
index 0000000..8c84437
--- /dev/null
+++ b/eval/f32-expm1minus-ulp.cc
@@ -0,0 +1,246 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <algorithm>
+#include <cfloat>
+#include <cmath>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <vector>
+
+#include <cpuinfo.h>
+#include <pthreadpool.h>
+
+#include <benchmark/benchmark.h>
+#include <fp16/fp16.h>
+
+#include "bench/utils.h"
+#include <xnnpack/AlignedAllocator.h>
+#include <xnnpack/common.h>
+#include <xnnpack/math-stubs.h>
+
+
+struct ComputeErrorContext {
+  const float* input;
+  const float* output;
+  float* error;
+};
+
+static void ComputeError(
+  struct ComputeErrorContext* context,
+  size_t start,
+  size_t range)
+{
+  const float* input = context->input;
+  const float* output = context->output;
+  float* error = context->error;
+  for (size_t i = start; i < start + range; i++) {
+    const double output_ref = std::expm1(double(input[i]));
+    const double abs_error = std::abs(output_ref - double(output[i]));
+    const float output_abs = std::abs(output_ref);
+    const float output_ulp = fp32_from_bits(fp32_to_bits(output_abs) + 1) - output_abs;
+    error[i] = float(abs_error / output_ulp);
+  }
+}
+
+static void ExpM1Error(benchmark::State& state,
+  xnn_f32_unary_math_function expm1,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
+{
+  if (!cpuinfo_initialize()) {
+    state.SkipWithError("failed cpuinfo init");
+    return;
+  }
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
+  // The smallest x for which expm1f(x) is not saturated at -1 (-0x1.154244p+4f).
+  const uint32_t min_input = UINT32_C(0xC18AA122);
+  // Number of elements in one block of inputs/outputs.
+  // Combining multiple elements in a block reduce function call overhead.
+  const size_t block_size = 16384;
+  // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
+  const size_t tile_size = 64;
+
+  uint32_t num_threads = cpuinfo_get_cores_count();
+  #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+    // Use all cores except for the least performant cluster
+    if (cpuinfo_get_clusters_count() > 1) {
+      num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
+    }
+  #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+  std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
+    pthreadpool_create(num_threads), pthreadpool_destroy);
+
+  std::vector<float, AlignedAllocator<float, 64>> x(block_size);
+  std::vector<float, AlignedAllocator<float, 64>> y(block_size);
+  std::vector<float> ulp_error(block_size);
+  float max_ulp_error = 0.0f;
+
+  ComputeErrorContext context;
+  context.input = x.data();
+  context.output = y.data();
+  context.error = ulp_error.data();
+  for (auto _ : state) {
+    for (uint32_t n = min_input; int32_t(n) < 0; n -= block_size) {
+      for (uint32_t i = 0; i < block_size; i++) {
+        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
+      }
+      std::fill(y.begin(), y.end(), std::nanf(""));
+
+      expm1(block_size * sizeof(float), x.data(), y.data());
+
+      pthreadpool_parallelize_1d_tile_1d(
+          threadpool.get(),
+          reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
+          static_cast<void*>(&context),
+          block_size, tile_size, 0 /* flags */);
+
+      max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
+        static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
+    }
+  }
+
+  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
+}
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  BENCHMARK_CAPTURE(ExpM1Error, neon_rr2_lut16_p3,
+                    xnn_math_f32_expm1minus__neon_rr2_lut16_p3,
+                    benchmark::utils::CheckNEON)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, neon_rr2_p6,
+                    xnn_math_f32_expm1minus__neon_rr2_p6,
+                    benchmark::utils::CheckNEON)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(ExpM1Error, neonfma_rr1_lut16_p3,
+                    xnn_math_f32_expm1minus__neonfma_rr1_lut16_p3,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, neonfma_rr1_p6,
+                    xnn_math_f32_expm1minus__neonfma_rr1_p6,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  BENCHMARK_CAPTURE(ExpM1Error, avx512f_rr1_lut16_p3_perm,
+                    xnn_math_f32_expm1minus__avx512f_rr1_lut16_p3_perm,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, avx512f_rr1_p6,
+                    xnn_math_f32_expm1minus__avx512f_rr1_p6,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(ExpM1Error, avx2_rr1_lut4_p4_perm,
+                    xnn_math_f32_expm1minus__avx2_rr1_lut4_p4_perm,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, avx2_rr1_lut8_p4_perm,
+                    xnn_math_f32_expm1minus__avx2_rr1_lut8_p4_perm,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, avx2_rr1_lut16_p3_gather,
+                    xnn_math_f32_expm1minus__avx2_rr1_lut16_p3_gather,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, avx2_rr1_p6,
+                    xnn_math_f32_expm1minus__avx2_rr1_p6,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(ExpM1Error, avx_rr2_lut4_p4_perm,
+                    xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm,
+                    benchmark::utils::CheckAVX)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, avx_rr2_lut16_p3,
+                    xnn_math_f32_expm1minus__avx_rr2_lut16_p3,
+                    benchmark::utils::CheckAVX)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, avx_rr2_p6,
+                    xnn_math_f32_expm1minus__avx_rr2_p6,
+                    benchmark::utils::CheckAVX)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(ExpM1Error, sse2_rr2_lut16_p3,
+                    xnn_math_f32_expm1minus__sse2_rr2_lut16_p3)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, sse2_rr2_p6,
+                    xnn_math_f32_expm1minus__sse2_rr2_p6)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+#if XNN_ARCH_WASMSIMD
+  BENCHMARK_CAPTURE(ExpM1Error, wasmsimd_rr2_lut16_p3_andnot,
+                    xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_andnot)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, wasmsimd_rr2_lut16_p3_max,
+                    xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_max)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, wasmsimd_rr2_p6_andnot,
+                    xnn_math_f32_expm1minus__wasmsimd_rr2_p6_andnot)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpM1Error, wasmsimd_rr2_p6_max,
+                    xnn_math_f32_expm1minus__wasmsimd_rr2_p6_max)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_WASMSIMD
+
+BENCHMARK_CAPTURE(ExpM1Error, scalar_rr2_lut4_p4,
+                  xnn_math_f32_expm1minus__scalar_rr2_lut4_p4)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(ExpM1Error, scalar_rr2_lut8_p3,
+                  xnn_math_f32_expm1minus__scalar_rr2_lut8_p3)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(ExpM1Error, scalar_rr2_lut8_p4,
+                  xnn_math_f32_expm1minus__scalar_rr2_lut8_p4)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(ExpM1Error, scalar_rr2_lut16_p3,
+                  xnn_math_f32_expm1minus__scalar_rr2_lut16_p3)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(ExpM1Error, scalar_rr2_lut16_p4,
+                  xnn_math_f32_expm1minus__scalar_rr2_lut16_p4)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(ExpM1Error, scalar_rr2_p5,
+                  xnn_math_f32_expm1minus__scalar_rr2_p5)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(ExpM1Error, scalar_rr2_p6,
+                  xnn_math_f32_expm1minus__scalar_rr2_p6)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+
+#ifndef XNNPACK_BENCHMARK_NO_MAIN
+BENCHMARK_MAIN();
+#endif
diff --git a/eval/f32-expm1minus.cc b/eval/f32-expm1minus.cc
index d22537f..b2746c5 100644
--- a/eval/f32-expm1minus.cc
+++ b/eval/f32-expm1minus.cc
@@ -4,192 +4,1802 @@
 // LICENSE file in the root directory of this source tree.
 
 #include <algorithm>
-#include <cfloat>
 #include <cmath>
-#include <functional>
-#include <random>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <iomanip>
+#include <ios>
 #include <vector>
 
-#include <benchmark/benchmark.h>
-#include <fp16/fp16.h>
+#include <gtest/gtest.h>
 
-#include "bench/utils.h"
+#include <fp16.h>
+
 #include <xnnpack/AlignedAllocator.h>
 #include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
 #include <xnnpack/math-stubs.h>
 
 
-static void Expm1Error(benchmark::State& state,
-  xnn_f32_unary_math_function expm1,
-  size_t tile_size,
-  benchmark::utils::IsaCheckFunction isa_check = nullptr)
-{
-  if (isa_check && !isa_check(state)) {
-    return;
+constexpr int kBlockSize = 1024;
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(EXPM1MINUS__NEON_RR2_LUT16_P3, negative_zero) {
+    TEST_REQUIRES_ARM_NEON;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
   }
 
-  // The smallest x for which expm1f(x) is not saturated at -1 (-0x1.154244p+4f).
-  const uint32_t min_input = 0xC18AA122;
-  // Number of tiles in one block of inputs/outputs. Combining multiple tiles in a block reduce function call overhead.
-  const size_t num_tiles = 100;
+  TEST(EXPM1MINUS__NEON_RR2_LUT16_P3, negative_saturation) {
+    TEST_REQUIRES_ARM_NEON;
 
-  double max_ulp_error = 0.0;
-  std::vector<float, AlignedAllocator<float, 64>> x(tile_size * num_tiles);
-  std::vector<float, AlignedAllocator<float, 64>> y(tile_size * num_tiles);
-  for (auto _ : state) {
-    for (uint32_t n = min_input; int32_t(n) < 0; n -= tile_size * num_tiles) {
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
       }
-      std::fill(y.begin(), y.end(), std::nanf(""));
-
-      expm1(tile_size * num_tiles * sizeof(float), x.data(), y.data());
-
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        const double y_ref = std::expm1(double(x[i]));
-        const double abs_error = std::abs(y_ref - double(y[i]));
-        const float y_abs = std::abs(y_ref);
-        const float y_ulp = fp32_from_bits(fp32_to_bits(y_abs) + 1) - y_abs;
-        max_ulp_error = std::max<double>(max_ulp_error, abs_error / y_ulp);
+      xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
       }
     }
   }
 
-  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
-}
+  TEST(EXPM1MINUS__NEON_RR2_LUT16_P3, positive_nan) {
+    TEST_REQUIRES_ARM_NEON;
 
-#if XNN_ARCH_ARM || XNN_ARCH_ARM64
-  static void f32_expm1minus__neon_rr2_lut16_p3(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__neon_rr2_lut16_p3, 4, benchmark::utils::CheckNEON);
-  }
-  static void f32_expm1minus__neon_rr2_p6(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__neon_rr2_p6, 4, benchmark::utils::CheckNEON);
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
   }
 
-  static void f32_expm1minus__neonfma_rr1_lut16_p3(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__neonfma_rr1_lut16_p3, 4, benchmark::utils::CheckNEONFMA);
-  }
-  static void f32_expm1minus__neonfma_rr1_p6(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__neonfma_rr1_p6, 4, benchmark::utils::CheckNEONFMA);
-  }
+  TEST(EXPM1MINUS__NEON_RR2_LUT16_P3, negative_nan) {
+    TEST_REQUIRES_ARM_NEON;
 
-  BENCHMARK(f32_expm1minus__neon_rr2_lut16_p3)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__neon_rr2_p6)->Unit(benchmark::kMillisecond)->Iterations(1);
-
-  BENCHMARK(f32_expm1minus__neonfma_rr1_lut16_p3)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__neonfma_rr1_p6)->Unit(benchmark::kMillisecond)->Iterations(1);
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
-#if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  static void f32_expm1minus__avx512f_rr1_lut16_p3_perm(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__avx512f_rr1_lut16_p3_perm, 16, benchmark::utils::CheckAVX512F);
-  }
-  static void f32_expm1minus__avx512f_rr1_p6(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__avx512f_rr1_p6, 16, benchmark::utils::CheckAVX512F);
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(EXPM1MINUS__NEON_RR2_P6, negative_zero) {
+    TEST_REQUIRES_ARM_NEON;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__neon_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
   }
 
-  BENCHMARK(f32_expm1minus__avx512f_rr1_lut16_p3_perm)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__avx512f_rr1_p6)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPM1MINUS__NEON_RR2_P6, negative_saturation) {
+    TEST_REQUIRES_ARM_NEON;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__neon_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__NEON_RR2_P6, positive_nan) {
+    TEST_REQUIRES_ARM_NEON;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__neon_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__NEON_RR2_P6, negative_nan) {
+    TEST_REQUIRES_ARM_NEON;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__neon_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(EXPM1MINUS__NEONFMA_RR1_LUT16_P3, negative_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__neonfma_rr1_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__NEONFMA_RR1_LUT16_P3, negative_saturation) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__neonfma_rr1_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__NEONFMA_RR1_LUT16_P3, positive_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__neonfma_rr1_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__NEONFMA_RR1_LUT16_P3, negative_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__neonfma_rr1_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(EXPM1MINUS__NEONFMA_RR1_P6, negative_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__neonfma_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__NEONFMA_RR1_P6, negative_saturation) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__neonfma_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__NEONFMA_RR1_P6, positive_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__neonfma_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__NEONFMA_RR1_P6, negative_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__neonfma_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  static void f32_expm1minus__avx2_rr1_lut4_p4_perm(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__avx2_rr1_lut4_p4_perm, 8, benchmark::utils::CheckAVX2);
-  }
-  static void f32_expm1minus__avx2_rr1_lut8_p4_perm(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__avx2_rr1_lut8_p4_perm, 8, benchmark::utils::CheckAVX2);
-  }
-  static void f32_expm1minus__avx2_rr1_lut16_p3_gather(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__avx2_rr1_lut16_p3_gather, 8, benchmark::utils::CheckAVX2);
-  }
-  static void f32_expm1minus__avx2_rr1_p6(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__avx2_rr1_p6, 8, benchmark::utils::CheckAVX2);
+  TEST(EXPM1MINUS__AVX512F_RR1_LUT16_P3_PERM, negative_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__avx512f_rr1_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
   }
 
-  BENCHMARK(f32_expm1minus__avx2_rr1_lut4_p4_perm)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__avx2_rr1_lut8_p4_perm)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__avx2_rr1_lut16_p3_gather)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__avx2_rr1_p6)->Unit(benchmark::kMillisecond)->Iterations(1);
+  TEST(EXPM1MINUS__AVX512F_RR1_LUT16_P3_PERM, negative_saturation) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__avx512f_rr1_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX512F_RR1_LUT16_P3_PERM, positive_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__avx512f_rr1_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX512F_RR1_LUT16_P3_PERM, negative_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__avx512f_rr1_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
+
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  static void f32_expm1minus__avx_rr2_lut4_p4_perm(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm, 8, benchmark::utils::CheckAVX);
-  }
-  static void f32_expm1minus__avx_rr2_lut16_p3(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__avx_rr2_lut16_p3, 8, benchmark::utils::CheckAVX);
-  }
-  static void f32_expm1minus__avx_rr2_p6(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__avx_rr2_p6, 8, benchmark::utils::CheckAVX);
+  TEST(EXPM1MINUS__AVX512F_RR1_P6, negative_zero) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__avx512f_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
   }
 
-  BENCHMARK(f32_expm1minus__avx_rr2_lut4_p4_perm)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__avx_rr2_lut16_p3)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__avx_rr2_p6)->Unit(benchmark::kMillisecond)->Iterations(1);
+  TEST(EXPM1MINUS__AVX512F_RR1_P6, negative_saturation) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__avx512f_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX512F_RR1_P6, positive_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__avx512f_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX512F_RR1_P6, negative_nan) {
+    TEST_REQUIRES_X86_AVX512F;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__avx512f_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
+
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  static void f32_expm1minus__sse2_rr2_lut16_p3(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__sse2_rr2_lut16_p3, 4);
-  }
-  static void f32_expm1minus__sse2_rr2_p6(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__sse2_rr2_p6, 4);
+  TEST(EXPM1MINUS__AVX2_RR1_LUT4_P4_PERM, negative_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__avx2_rr1_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
   }
 
-  BENCHMARK(f32_expm1minus__sse2_rr2_lut16_p3)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__sse2_rr2_p6)->Unit(benchmark::kMillisecond)->Iterations(1);
+  TEST(EXPM1MINUS__AVX2_RR1_LUT4_P4_PERM, negative_saturation) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_LUT4_P4_PERM, positive_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_LUT4_P4_PERM, negative_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPM1MINUS__AVX2_RR1_LUT8_P4_PERM, negative_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__avx2_rr1_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_LUT8_P4_PERM, negative_saturation) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_LUT8_P4_PERM, positive_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_LUT8_P4_PERM, negative_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPM1MINUS__AVX2_RR1_LUT16_P3_GATHER, negative_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__avx2_rr1_lut16_p3_gather(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_LUT16_P3_GATHER, negative_saturation) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_lut16_p3_gather(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_LUT16_P3_GATHER, positive_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_lut16_p3_gather(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_LUT16_P3_GATHER, negative_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_lut16_p3_gather(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPM1MINUS__AVX2_RR1_P6, negative_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__avx2_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_P6, negative_saturation) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_P6, positive_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX2_RR1_P6, negative_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__avx2_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPM1MINUS__AVX_RR2_LUT4_P4_PERM, negative_zero) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__AVX_RR2_LUT4_P4_PERM, negative_saturation) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX_RR2_LUT4_P4_PERM, positive_nan) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX_RR2_LUT4_P4_PERM, negative_nan) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPM1MINUS__AVX_RR2_LUT16_P3, negative_zero) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__avx_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__AVX_RR2_LUT16_P3, negative_saturation) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__avx_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX_RR2_LUT16_P3, positive_nan) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__avx_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX_RR2_LUT16_P3, negative_nan) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__avx_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPM1MINUS__AVX_RR2_P6, negative_zero) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__avx_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__AVX_RR2_P6, negative_saturation) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__avx_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX_RR2_P6, positive_nan) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__avx_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__AVX_RR2_P6, negative_nan) {
+    TEST_REQUIRES_X86_AVX;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__avx_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPM1MINUS__SSE2_RR2_LUT16_P3, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__sse2_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__SSE2_RR2_LUT16_P3, negative_saturation) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__sse2_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__SSE2_RR2_LUT16_P3, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__sse2_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__SSE2_RR2_LUT16_P3, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__sse2_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPM1MINUS__SSE2_RR2_P6, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__sse2_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__SSE2_RR2_P6, negative_saturation) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__sse2_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__SSE2_RR2_P6, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__sse2_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__SSE2_RR2_P6, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__sse2_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
 #if XNN_ARCH_WASMSIMD
-  static void f32_expm1minus__wasmsimd_rr2_lut16_p3_andnot(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_andnot, 4);
-  }
-  static void f32_expm1minus__wasmsimd_rr2_lut16_p3_max(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_max, 4);
-  }
-  static void f32_expm1minus__wasmsimd_rr2_p6_andnot(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__wasmsimd_rr2_p6_andnot, 4);
-  }
-  static void f32_expm1minus__wasmsimd_rr2_p6_max(benchmark::State& state) {
-    Expm1Error(state, xnn_math_f32_expm1minus__wasmsimd_rr2_p6_max, 4);
+  TEST(EXPM1MINUS__WASMSIMD_RR2_LUT16_P3_ANDNOT, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
   }
 
-  BENCHMARK(f32_expm1minus__wasmsimd_rr2_lut16_p3_andnot)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__wasmsimd_rr2_lut16_p3_max)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__wasmsimd_rr2_p6_andnot)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expm1minus__wasmsimd_rr2_p6_max)->Unit(benchmark::kMillisecond)->Iterations(1);
+  TEST(EXPM1MINUS__WASMSIMD_RR2_LUT16_P3_ANDNOT, negative_saturation) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_LUT16_P3_ANDNOT, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_LUT16_P3_ANDNOT, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
 #endif  // XNN_ARCH_WASMSIMD
 
-static void f32_expm1minus__scalar_rr2_lut4_p4(benchmark::State& state) {
-  Expm1Error(state, xnn_math_f32_expm1minus__scalar_rr2_lut4_p4, 1);
-}
-static void f32_expm1minus__scalar_rr2_lut8_p3(benchmark::State& state) {
-  Expm1Error(state, xnn_math_f32_expm1minus__scalar_rr2_lut8_p3, 1);
-}
-static void f32_expm1minus__scalar_rr2_lut8_p4(benchmark::State& state) {
-  Expm1Error(state, xnn_math_f32_expm1minus__scalar_rr2_lut8_p4, 1);
-}
-static void f32_expm1minus__scalar_rr2_lut16_p3(benchmark::State& state) {
-  Expm1Error(state, xnn_math_f32_expm1minus__scalar_rr2_lut16_p3, 1);
-}
-static void f32_expm1minus__scalar_rr2_lut16_p4(benchmark::State& state) {
-  Expm1Error(state, xnn_math_f32_expm1minus__scalar_rr2_lut16_p4, 1);
-}
-static void f32_expm1minus__scalar_rr2_p5(benchmark::State& state) {
-  Expm1Error(state, xnn_math_f32_expm1minus__scalar_rr2_p5, 1);
-}
-static void f32_expm1minus__scalar_rr2_p6(benchmark::State& state) {
-  Expm1Error(state, xnn_math_f32_expm1minus__scalar_rr2_p6, 1);
+
+#if XNN_ARCH_WASMSIMD
+  TEST(EXPM1MINUS__WASMSIMD_RR2_LUT16_P3_MAX, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_max(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_LUT16_P3_MAX, negative_saturation) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_max(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_LUT16_P3_MAX, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_max(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_LUT16_P3_MAX, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_lut16_p3_max(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(EXPM1MINUS__WASMSIMD_RR2_P6_ANDNOT, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__wasmsimd_rr2_p6_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_P6_ANDNOT, negative_saturation) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_p6_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_P6_ANDNOT, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_p6_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_P6_ANDNOT, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_p6_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(EXPM1MINUS__WASMSIMD_RR2_P6_MAX, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expm1minus__wasmsimd_rr2_p6_max(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 0.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_P6_MAX, negative_saturation) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_p6_max(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const float reference_output = -1.0f;
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_P6_MAX, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_p6_max(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPM1MINUS__WASMSIMD_RR2_P6_MAX, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expm1minus__wasmsimd_rr2_p6_max(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT4_P4, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expm1minus__scalar_rr2_lut4_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 0.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
 }
 
-BENCHMARK(f32_expm1minus__scalar_rr2_lut4_p4)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_expm1minus__scalar_rr2_lut8_p3)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_expm1minus__scalar_rr2_lut8_p4)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_expm1minus__scalar_rr2_lut16_p3)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_expm1minus__scalar_rr2_lut16_p4)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_expm1minus__scalar_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_expm1minus__scalar_rr2_p6)->Unit(benchmark::kMillisecond)->Iterations(1);
+TEST(EXPM1MINUS__SCALAR_RR2_LUT4_P4, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut4_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const float reference_output = -1.0f;
+      ASSERT_EQ(reference_output, outputs[i])
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
 
-#ifndef XNNPACK_BENCHMARK_NO_MAIN
-BENCHMARK_MAIN();
-#endif
+TEST(EXPM1MINUS__SCALAR_RR2_LUT4_P4, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut4_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT4_P4, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut4_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT8_P3, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expm1minus__scalar_rr2_lut8_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 0.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT8_P3, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut8_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const float reference_output = -1.0f;
+      ASSERT_EQ(reference_output, outputs[i])
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT8_P3, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut8_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT8_P3, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut8_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT8_P4, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expm1minus__scalar_rr2_lut8_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 0.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT8_P4, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut8_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const float reference_output = -1.0f;
+      ASSERT_EQ(reference_output, outputs[i])
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT8_P4, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut8_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT8_P4, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut8_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT16_P3, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expm1minus__scalar_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 0.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT16_P3, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const float reference_output = -1.0f;
+      ASSERT_EQ(reference_output, outputs[i])
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT16_P3, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT16_P3, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT16_P4, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expm1minus__scalar_rr2_lut16_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 0.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT16_P4, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut16_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const float reference_output = -1.0f;
+      ASSERT_EQ(reference_output, outputs[i])
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT16_P4, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut16_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_LUT16_P4, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_lut16_p4(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+
+TEST(EXPM1MINUS__SCALAR_RR2_P5, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expm1minus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 0.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_P5, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const float reference_output = -1.0f;
+      ASSERT_EQ(reference_output, outputs[i])
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_P5, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_P5, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+
+TEST(EXPM1MINUS__SCALAR_RR2_P6, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expm1minus__scalar_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 0.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_P6, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC18AA123); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const float reference_output = -1.0f;
+      ASSERT_EQ(reference_output, outputs[i])
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_P6, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPM1MINUS__SCALAR_RR2_P6, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expm1minus__scalar_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
diff --git a/eval/f32-expminus-ulp.cc b/eval/f32-expminus-ulp.cc
new file mode 100644
index 0000000..63dbd2d
--- /dev/null
+++ b/eval/f32-expminus-ulp.cc
@@ -0,0 +1,160 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <algorithm>
+#include <cfloat>
+#include <cmath>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <vector>
+
+#include <cpuinfo.h>
+#include <pthreadpool.h>
+
+#include <benchmark/benchmark.h>
+#include <fp16/fp16.h>
+
+#include "bench/utils.h"
+#include <xnnpack/AlignedAllocator.h>
+#include <xnnpack/common.h>
+#include <xnnpack/math-stubs.h>
+
+
+struct ComputeErrorContext {
+  const float* input;
+  const float* output;
+  float* error;
+};
+
+static void ComputeError(
+  struct ComputeErrorContext* context,
+  size_t start,
+  size_t range)
+{
+  const float* input = context->input;
+  const float* output = context->output;
+  float* error = context->error;
+  for (size_t i = start; i < start + range; i++) {
+    const double output_ref = std::exp(double(input[i]));
+    const double abs_error = std::abs(output_ref - double(output[i]));
+    const float output_abs = std::abs(output_ref);
+    const float output_ulp = fp32_from_bits(fp32_to_bits(output_abs) + 1) - output_abs;
+    error[i] = float(abs_error / output_ulp);
+  }
+}
+
+static void ExpError(benchmark::State& state,
+  xnn_f32_unary_math_function exp,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
+{
+  if (!cpuinfo_initialize()) {
+    state.SkipWithError("failed cpuinfo init");
+    return;
+  }
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
+  // The smallest x for which expf(x) is normalized (-0x1.5D589Ep6f).
+  const uint32_t min_input = UINT32_C(0xC2AEAC4F);
+  // Number of elements in one block of inputs/outputs.
+  // Combining multiple elements in a block reduce function call overhead.
+  const size_t block_size = 16384;
+  // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
+  const size_t tile_size = 64;
+
+  uint32_t num_threads = cpuinfo_get_cores_count();
+  #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+    // Use all cores except for the least performant cluster
+    if (cpuinfo_get_clusters_count() > 1) {
+      num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
+    }
+  #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+  std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
+    pthreadpool_create(num_threads), pthreadpool_destroy);
+
+  std::vector<float, AlignedAllocator<float, 64>> x(block_size);
+  std::vector<float, AlignedAllocator<float, 64>> y(block_size);
+  std::vector<float> ulp_error(block_size);
+  float max_ulp_error = 0.0f;
+
+  ComputeErrorContext context;
+  context.input = x.data();
+  context.output = y.data();
+  context.error = ulp_error.data();
+  for (auto _ : state) {
+    for (uint32_t n = min_input; int32_t(n) < 0; n -= block_size) {
+      for (uint32_t i = 0; i < block_size; i++) {
+        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
+      }
+      std::fill(y.begin(), y.end(), std::nanf(""));
+
+      exp(block_size * sizeof(float), x.data(), y.data());
+
+      pthreadpool_parallelize_1d_tile_1d(
+          threadpool.get(),
+          reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
+          static_cast<void*>(&context),
+          block_size, tile_size, 0 /* flags */);
+
+      max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
+        static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
+    }
+  }
+
+  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
+}
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  BENCHMARK_CAPTURE(ExpError, neonfma_rr2_lut64_p2,
+                    xnn_math_f32_expminus__neonfma_rr2_lut64_p2,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, neonfma_rr2_lut2048_p1,
+                    xnn_math_f32_expminus__neonfma_rr2_lut2048_p1,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(ExpError, neonfma_rr2_p5,
+                    xnn_math_f32_expminus__neonfma_rr2_p5,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  BENCHMARK_CAPTURE(ExpError, avx2_rr2_p5,
+                    xnn_math_f32_expminus__avx2_rr2_p5,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(ExpError, sse2_rr2_p5,
+                    xnn_math_f32_expminus__sse2_rr2_p5,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+BENCHMARK_CAPTURE(ExpError, scalar_rr2_lut64_p2,
+                  xnn_math_f32_expminus__scalar_rr2_lut64_p2)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(ExpError, scalar_rr2_lut2048_p1,
+                  xnn_math_f32_expminus__scalar_rr2_lut2048_p1)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(ExpError, scalar_rr2_p5,
+                  xnn_math_f32_expminus__scalar_rr2_p5)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+
+#ifndef XNNPACK_BENCHMARK_NO_MAIN
+BENCHMARK_MAIN();
+#endif
diff --git a/eval/f32-expminus.cc b/eval/f32-expminus.cc
index cf9c08b..2895acb 100644
--- a/eval/f32-expminus.cc
+++ b/eval/f32-expminus.cc
@@ -1,98 +1,675 @@
-// Copyright 2019 Google LLC
+// Copyright 2020 Google LLC
 //
 // This source code is licensed under the BSD-style license found in the
 // LICENSE file in the root directory of this source tree.
 
 #include <algorithm>
-#include <cfloat>
 #include <cmath>
-#include <functional>
-#include <random>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <iomanip>
+#include <ios>
 #include <vector>
 
-#include <benchmark/benchmark.h>
-#include <fp16/fp16.h>
+#include <gtest/gtest.h>
+
+#include <fp16.h>
 
 #include <xnnpack/AlignedAllocator.h>
 #include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
 #include <xnnpack/math-stubs.h>
 
 
-static void ExpError(benchmark::State& state,
-  xnn_f32_unary_math_function exp,
-  size_t tile_size)
-{
-  // The smallest x for which expf(x) is normalized (-0x1.5D589Ep6f).
-  const uint32_t min_input = 0xC2AEAC4FL;
-  const size_t num_tiles = 100;
+constexpr int kBlockSize = 1024;
 
-  double max_ulp_error = 0.0;
-  std::vector<float, AlignedAllocator<float, 64>> x(tile_size * num_tiles);
-  std::vector<float, AlignedAllocator<float, 64>> y(tile_size * num_tiles);
-  for (auto _ : state) {
-    for (uint32_t n = min_input; int32_t(n) < 0; n -= tile_size * num_tiles) {
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(EXPMINUS__NEONFMA_RR2_LUT64_P2, negative_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_LUT64_P2, positive_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_LUT64_P2, negative_saturation) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2AEAC50); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
       }
-      std::fill(y.begin(), y.end(), std::nanf(""));
-
-      exp(tile_size * num_tiles * sizeof(float), x.data(), y.data());
-
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        const double y_ref = std::exp(double(x[i]));
-        const double abs_error = std::abs(y_ref - double(y[i]));
-        const float y_abs = std::abs(y_ref);
-        const float y_ulp = fp32_from_bits(fp32_to_bits(y_abs) + 1) - y_abs;
-        max_ulp_error = std::max<double>(max_ulp_error, abs_error / y_ulp);
+      xnn_math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
       }
     }
   }
 
-  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
-}
+  TEST(EXPMINUS__NEONFMA_RR2_LUT64_P2, positive_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
 
-#if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  static void f32_expminus__sse2_rr2_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_expminus__sse2_rr2_p5, 4);
-  }
-  static void f32_expminus__avx2_rr2_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_expminus__avx2_rr2_p5, 8);
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
   }
 
-  BENCHMARK(f32_expminus__sse2_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expminus__avx2_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPMINUS__NEONFMA_RR2_LUT64_P2, negative_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
 
-#if XNN_ARCH_ARM || XNN_ARCH_ARM64
-  static void f32_expminus__neonfma_rr2_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_expminus__neonfma_rr2_p5, 4);
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
   }
-  static void f32_expminus__neonfma_rr2_lut64_p2(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_expminus__neonfma_rr2_lut64_p2, 4);
-  }
-  static void f32_expminus__neonfma_rr2_lut2048_p1(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_expminus__neonfma_rr2_lut2048_p1, 4);
-  }
-
-  BENCHMARK(f32_expminus__neonfma_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expminus__neonfma_rr2_lut64_p2)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_expminus__neonfma_rr2_lut2048_p1)->Unit(benchmark::kMillisecond)->Iterations(1);
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
-static void f32_expminus__scalar_rr2_p5(benchmark::State& state) {
-  ExpError(state, xnn_math_f32_expminus__scalar_rr2_p5, 1);
-}
-static void f32_expminus__scalar_rr2_lut64_p2(benchmark::State& state) {
-  ExpError(state, xnn_math_f32_expminus__scalar_rr2_lut64_p2, 1);
-}
-static void f32_expminus__scalar_rr2_lut2048_p1(benchmark::State& state) {
-  ExpError(state, xnn_math_f32_expminus__scalar_rr2_lut2048_p1, 1);
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(EXPMINUS__NEONFMA_RR2_LUT2048_P1, negative_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expminus__neonfma_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_LUT2048_P1, positive_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_expminus__neonfma_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_LUT2048_P1, negative_saturation) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2AEAC50); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expminus__neonfma_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_LUT2048_P1, positive_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expminus__neonfma_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_LUT2048_P1, negative_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expminus__neonfma_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(EXPMINUS__NEONFMA_RR2_P5, negative_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expminus__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_P5, positive_zero) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_expminus__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_P5, negative_saturation) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2AEAC50); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expminus__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_P5, positive_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expminus__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPMINUS__NEONFMA_RR2_P5, negative_nan) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expminus__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPMINUS__AVX2_RR2_P5, negative_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expminus__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__AVX2_RR2_P5, positive_zero) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_expminus__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__AVX2_RR2_P5, negative_saturation) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2AEAC50); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expminus__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPMINUS__AVX2_RR2_P5, positive_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expminus__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPMINUS__AVX2_RR2_P5, negative_nan) {
+    TEST_REQUIRES_X86_AVX2;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expminus__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(EXPMINUS__SSE2_RR2_P5, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_expminus__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__SSE2_RR2_P5, positive_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_expminus__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    const float reference_output = 1.0f;
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+      << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+  }
+
+  TEST(EXPMINUS__SSE2_RR2_P5, negative_saturation) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC2AEAC50); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+      }
+      xnn_math_f32_expminus__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint32_t reference_output = UINT32_C(0x00000000);
+        ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPMINUS__SSE2_RR2_P5, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+      }
+      xnn_math_f32_expminus__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+
+  TEST(EXPMINUS__SSE2_RR2_P5, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+      }
+      xnn_math_f32_expminus__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_TRUE(std::isnan(outputs[i]))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+TEST(EXPMINUS__SCALAR_RR2_LUT64_P2, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expminus__scalar_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 1.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
 }
 
-BENCHMARK(f32_expminus__scalar_rr2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_expminus__scalar_rr2_lut64_p2)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_expminus__scalar_rr2_lut2048_p1)->Unit(benchmark::kMillisecond)->Iterations(1);
+TEST(EXPMINUS__SCALAR_RR2_LUT64_P2, positive_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), +0.0f);
+  xnn_math_f32_expminus__scalar_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 1.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
 
-#ifndef XNNPACK_BENCHMARK_NO_MAIN
-BENCHMARK_MAIN();
-#endif
+TEST(EXPMINUS__SCALAR_RR2_LUT64_P2, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC2AEAC50); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expminus__scalar_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const uint32_t reference_output = UINT32_C(0x00000000);
+      ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPMINUS__SCALAR_RR2_LUT64_P2, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expminus__scalar_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPMINUS__SCALAR_RR2_LUT64_P2, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expminus__scalar_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+
+TEST(EXPMINUS__SCALAR_RR2_LUT2048_P1, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expminus__scalar_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 1.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPMINUS__SCALAR_RR2_LUT2048_P1, positive_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), +0.0f);
+  xnn_math_f32_expminus__scalar_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 1.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPMINUS__SCALAR_RR2_LUT2048_P1, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC2AEAC50); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expminus__scalar_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const uint32_t reference_output = UINT32_C(0x00000000);
+      ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPMINUS__SCALAR_RR2_LUT2048_P1, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expminus__scalar_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPMINUS__SCALAR_RR2_LUT2048_P1, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expminus__scalar_rr2_lut2048_p1(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+
+TEST(EXPMINUS__SCALAR_RR2_P5, negative_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), -0.0f);
+  xnn_math_f32_expminus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 1.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPMINUS__SCALAR_RR2_P5, positive_zero) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  std::fill(inputs.begin(), inputs.end(), +0.0f);
+  xnn_math_f32_expminus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+  const float reference_output = 1.0f;
+  ASSERT_EQ(reference_output, outputs[0])
+    << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+    << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(reference_output)
+    << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[0]);
+}
+
+TEST(EXPMINUS__SCALAR_RR2_P5, negative_saturation) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0xC2AEAC50); n <= UINT32_C(0xFF800000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(n + i, UINT32_C(0xFF800000)));
+    }
+    xnn_math_f32_expminus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      const uint32_t reference_output = UINT32_C(0x00000000);
+      ASSERT_EQ(reference_output, fp32_to_bits(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", reference = 0x" << std::hex << std::setw(8) << std::setfill('0') << reference_output
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPMINUS__SCALAR_RR2_P5, positive_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), n + i));
+    }
+    xnn_math_f32_expminus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
+
+TEST(EXPMINUS__SCALAR_RR2_P5, negative_nan) {
+  std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+  std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
+  for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      inputs[i] = fp32_from_bits(std::min(UINT32_C(0x7FFFFFFF), UINT32_C(0x80000000) | (n + i)));
+    }
+    xnn_math_f32_expminus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data());
+    for (uint32_t i = 0; i < kBlockSize; i++) {
+      ASSERT_TRUE(std::isnan(outputs[i]))
+        << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+        << ", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(outputs[i]);
+    }
+  }
+}
diff --git a/eval/f32-extexp-ulp.cc b/eval/f32-extexp-ulp.cc
new file mode 100644
index 0000000..9b7d562
--- /dev/null
+++ b/eval/f32-extexp-ulp.cc
@@ -0,0 +1,155 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <algorithm>
+#include <cfloat>
+#include <cmath>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <vector>
+
+#include <cpuinfo.h>
+#include <pthreadpool.h>
+
+#include <benchmark/benchmark.h>
+#include <fp16/fp16.h>
+
+#include "bench/utils.h"
+#include <xnnpack/AlignedAllocator.h>
+#include <xnnpack/common.h>
+#include <xnnpack/math-stubs.h>
+
+
+struct ComputeErrorContext {
+  const float* input;
+  const float* output_m;
+  const float* output_e;
+  float* error;
+};
+
+static void ComputeError(
+  struct ComputeErrorContext* context,
+  size_t start,
+  size_t range)
+{
+  const float* input = context->input;
+  const float* output_m = context->output_m;
+  const float* output_e = context->output_e;
+  float* error = context->error;
+  const double inv_ulp = 0x1.0p+24;
+  for (size_t i = start; i < start + range; i++) {
+    const double output_ref = std::exp(double(input[i]));
+    int output_ref_e;
+    const double output_ref_m = std::frexp(output_ref, &output_ref_e);
+    const double ulp_error = std::abs(output_ref_m - std::ldexp(double(output_m[i]), int(output_e[i]) - output_ref_e)) * inv_ulp;
+    error[i] = float(ulp_error);
+  }
+}
+
+static void ExtExpError(benchmark::State& state,
+  xnn_f32_ext_unary_math_function extexp,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
+{
+  if (!cpuinfo_initialize()) {
+    state.SkipWithError("failed cpuinfo init");
+    return;
+  }
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
+  // The smallest x for which exp(x) (double-precision) is normal (-0x1.6232BCp9f).
+  const uint32_t min_input = 0xC431195E;
+  // The largest x for which exp(x) (double-precision) is finite (0x1.62E42Ep9).
+  const uint32_t max_input = 0x44317217;
+  // Number of elements in one block of inputs/outputs.
+  // Combining multiple elements in a block reduce function call overhead.
+  const size_t block_size = 16384;
+  // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
+  const size_t tile_size = 64;
+
+  uint32_t num_threads = cpuinfo_get_cores_count();
+  #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+    // Use all cores except for the least performant cluster
+    if (cpuinfo_get_clusters_count() > 1) {
+      num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
+    }
+  #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+  std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
+    pthreadpool_create(num_threads), pthreadpool_destroy);
+
+  std::vector<float, AlignedAllocator<float, 64>> x(block_size);
+  std::vector<float, AlignedAllocator<float, 64>> m(block_size);
+  std::vector<float, AlignedAllocator<float, 64>> e(block_size);
+  std::vector<float> ulp_error(block_size);
+  float max_ulp_error = 0.0f;
+
+  ComputeErrorContext context;
+  context.input = x.data();
+  context.output_m = m.data();
+  context.output_e = e.data();
+  context.error = ulp_error.data();
+  for (auto _ : state) {
+    for (uint32_t n = min_input; int32_t(n) < 0; n -= block_size) {
+      for (uint32_t i = 0; i < block_size; i++) {
+        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
+      }
+      std::fill(m.begin(), m.end(), std::nanf(""));
+      std::fill(e.begin(), e.end(), std::nanf(""));
+
+      extexp(block_size * sizeof(float), x.data(), m.data(), e.data());
+
+      pthreadpool_parallelize_1d_tile_1d(
+          threadpool.get(),
+          reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
+          static_cast<void*>(&context),
+          block_size, tile_size, 0 /* flags */);
+
+      max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
+        static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
+    }
+    for (uint32_t n = 0; n < max_input; n += block_size) {
+      for (uint32_t i = 0; i < block_size; i++) {
+        x[i] = fp32_from_bits(std::min<uint32_t>(n + i, max_input));
+      }
+      std::fill(m.begin(), m.end(), std::nanf(""));
+      std::fill(e.begin(), e.end(), std::nanf(""));
+
+      extexp(block_size * sizeof(float), x.data(), m.data(), e.data());
+
+      pthreadpool_parallelize_1d_tile_1d(
+          threadpool.get(),
+          reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
+          static_cast<void*>(&context),
+          block_size, tile_size, 0 /* flags */);
+
+      max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
+        static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
+    }
+  }
+
+  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
+}
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  BENCHMARK_CAPTURE(ExtExpError, avx512f_p5,
+                    xnn_math_f32_extexp__avx512f_p5,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(ExtExpError, avx2_p5,
+                    xnn_math_f32_extexp__avx2_p5,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+#ifndef XNNPACK_BENCHMARK_NO_MAIN
+BENCHMARK_MAIN();
+#endif
diff --git a/eval/f32-extexp.cc b/eval/f32-extexp.cc
deleted file mode 100644
index 32bd943..0000000
--- a/eval/f32-extexp.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <algorithm>
-#include <cfloat>
-#include <cmath>
-#include <functional>
-#include <random>
-#include <vector>
-
-#include <benchmark/benchmark.h>
-#include <fp16/fp16.h>
-
-#include <xnnpack/AlignedAllocator.h>
-#include <xnnpack/common.h>
-#include <xnnpack/math-stubs.h>
-
-
-static void ExpError(benchmark::State& state,
-  xnn_f32_ext_unary_math_function extexp,
-  size_t tile_size)
-{
-  // The smallest x for which exp(x) (double-precision) is normal (-0x1.6232BCp9f).
-  const uint32_t min_input = 0xC431195E;
-  // The largest x for which exp(x) (double-precision) is finite (0x1.62E42Ep9).
-  const uint32_t max_input = 0x44317217;
-  // Number of tiles in one block of inputs/outputs. Combining multiple tiles in a block reduce function call overhead.
-  const size_t num_tiles = 100;
-
-  double max_ulp_error = 0.0;
-  std::vector<float, AlignedAllocator<float, 64>> x(tile_size * num_tiles);
-  std::vector<float, AlignedAllocator<float, 64>> m(tile_size * num_tiles);
-  std::vector<float, AlignedAllocator<float, 64>> e(tile_size * num_tiles);
-  for (auto _ : state) {
-    for (uint32_t n = min_input; int32_t(n) < 0; n -= tile_size * num_tiles) {
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
-      }
-      std::fill(m.begin(), m.end(), std::nanf(""));
-      std::fill(e.begin(), e.end(), std::nanf(""));
-
-      extexp(tile_size * num_tiles * sizeof(float), x.data(), m.data(), e.data());
-
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        const double y_ref = std::exp(double(x[i]));
-        int y_ref_e;
-        const double y_ref_m = std::frexp(y_ref, &y_ref_e);
-        const double ulp_error = std::abs(y_ref_m - std::ldexp(double(m[i]), int(e[i]) - y_ref_e)) * 0x1.0p+24;
-        max_ulp_error = std::max<double>(max_ulp_error, ulp_error);
-      }
-    }
-    for (uint32_t n = 0; n < max_input; n += tile_size * num_tiles) {
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        x[i] = fp32_from_bits(std::min<uint32_t>(n + i, max_input));
-      }
-      std::fill(m.begin(), m.end(), std::nanf(""));
-      std::fill(e.begin(), e.end(), std::nanf(""));
-
-      extexp(tile_size * num_tiles * sizeof(float), x.data(), m.data(), e.data());
-
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        const double y_ref = std::exp(double(x[i]));
-        int y_ref_e;
-        const double y_ref_m = std::frexp(y_ref, &y_ref_e);
-        const double ulp_error = std::abs(y_ref_m - std::ldexp(double(m[i]), int(e[i]) - y_ref_e)) * 0x1.0p+24;
-        max_ulp_error = std::max<double>(max_ulp_error, ulp_error);
-      }
-    }
-  }
-
-  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
-}
-
-#if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  static void f32_extexp__avx512f_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_extexp__avx512f_p5, 16);
-  }
-  static void f32_extexp__avx2_p5(benchmark::State& state) {
-    ExpError(state, xnn_math_f32_extexp__avx2_p5, 8);
-  }
-
-  BENCHMARK(f32_extexp__avx512f_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_extexp__avx2_p5)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
-
-#ifndef XNNPACK_BENCHMARK_NO_MAIN
-BENCHMARK_MAIN();
-#endif
diff --git a/eval/f32-sigmoid-ulp.cc b/eval/f32-sigmoid-ulp.cc
new file mode 100644
index 0000000..b4d3dc7
--- /dev/null
+++ b/eval/f32-sigmoid-ulp.cc
@@ -0,0 +1,541 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <algorithm>
+#include <cfloat>
+#include <cmath>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <vector>
+
+#include <cpuinfo.h>
+#include <pthreadpool.h>
+
+#include <benchmark/benchmark.h>
+#include <fp16/fp16.h>
+
+#include "bench/utils.h"
+#include <xnnpack/AlignedAllocator.h>
+#include <xnnpack/common.h>
+#include <xnnpack/math-stubs.h>
+
+
+struct ComputeErrorContext {
+  const float* input;
+  const float* output;
+  float* error;
+};
+
+static void ComputeError(
+  struct ComputeErrorContext* context,
+  size_t start,
+  size_t range)
+{
+  const float* input = context->input;
+  const float* output = context->output;
+  float* error = context->error;
+  for (size_t i = start; i < start + range; i++) {
+    const double input_val = input[i];
+    double output_ref = 0.0;
+    if (input_val < 0.0) {
+      const double exp_val = std::exp(input_val);
+      output_ref = exp_val / (1.0 + exp_val);
+    } else {
+      output_ref = 1.0 / (1.0 + std::exp(-input_val));
+    }
+    const double abs_error = std::abs(output_ref - double(output[i]));
+    const float output_abs = std::abs(output_ref);
+    const float output_ulp = fp32_from_bits(fp32_to_bits(output_abs) + 1) - output_abs;
+    error[i] = float(abs_error / output_ulp);
+  }
+}
+
+static void SigmoidError(benchmark::State& state,
+  xnn_f32_unary_math_function sigmoid,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
+{
+  if (!cpuinfo_initialize()) {
+    state.SkipWithError("failed cpuinfo init");
+    return;
+  }
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
+  // The smallest x for which sigmoidf(x) is normalized (-0x1.5D589Ep+6f).
+  const uint32_t min_input = 0xC2AEAC4F;
+  // The largest x for which sigmoidf(x) is not 1.0f (0x1.154244p+4f).
+  const uint32_t max_input = 0x418AA122;
+  // Number of elements in one block of inputs/outputs.
+  // Combining multiple elements in a block reduce function call overhead.
+  const size_t block_size = 16384;
+  // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
+  const size_t tile_size = 64;
+
+  uint32_t num_threads = cpuinfo_get_cores_count();
+  #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+    // Use all cores except for the least performant cluster
+    if (cpuinfo_get_clusters_count() > 1) {
+      num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
+    }
+  #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+  std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
+    pthreadpool_create(num_threads), pthreadpool_destroy);
+
+  std::vector<float, AlignedAllocator<float, 64>> x(block_size);
+  std::vector<float, AlignedAllocator<float, 64>> y(block_size);
+  std::vector<float> ulp_error(block_size);
+  float max_ulp_error = 0.0f;
+
+  ComputeErrorContext context;
+  context.input = x.data();
+  context.output = y.data();
+  context.error = ulp_error.data();
+  for (auto _ : state) {
+    for (uint32_t n = min_input; int32_t(n) < 0; n -= block_size) {
+      for (uint32_t i = 0; i < block_size; i++) {
+        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
+      }
+      std::fill(y.begin(), y.end(), std::nanf(""));
+
+      sigmoid(block_size * sizeof(float), x.data(), y.data());
+
+      pthreadpool_parallelize_1d_tile_1d(
+          threadpool.get(),
+          reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
+          static_cast<void*>(&context),
+          block_size, tile_size, 0 /* flags */);
+
+      max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
+        static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
+    }
+    for (uint32_t n = 0; n < max_input; n += block_size) {
+      for (uint32_t i = 0; i < block_size; i++) {
+        x[i] = fp32_from_bits(std::min<uint32_t>(n + i, max_input));
+      }
+      std::fill(y.begin(), y.end(), std::nanf(""));
+
+      sigmoid(block_size * sizeof(float), x.data(), y.data());
+
+      pthreadpool_parallelize_1d_tile_1d(
+          threadpool.get(),
+          reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
+          static_cast<void*>(&context),
+          block_size, tile_size, 0 /* flags */);
+
+      max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
+        static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
+    }
+  }
+
+  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
+}
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_lut64_p2_nr2recps,
+                    xnn_math_f32_sigmoid__neonfma_rr1_lut64_p2_nr2recps,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_lut64_p2_nr1recps1fma,
+                    xnn_math_f32_sigmoid__neonfma_rr1_lut64_p2_nr1recps1fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_lut64_p2_nr2fma,
+                    xnn_math_f32_sigmoid__neonfma_rr1_lut64_p2_nr2fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_lut64_p2_nr2recps,
+                    xnn_math_f32_sigmoid__neonfma_rr2_lut64_p2_nr2recps,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_lut64_p2_nr1recps1fma,
+                    xnn_math_f32_sigmoid__neonfma_rr2_lut64_p2_nr1recps1fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_lut64_p2_nr2fma,
+                    xnn_math_f32_sigmoid__neonfma_rr2_lut64_p2_nr2fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_lut2048_p1_nr2recps,
+                    xnn_math_f32_sigmoid__neonfma_rr1_lut2048_p1_nr2recps,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_lut2048_p1_nr1recps1fma,
+                    xnn_math_f32_sigmoid__neonfma_rr1_lut2048_p1_nr1recps1fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_lut2048_p1_nr2fma,
+                    xnn_math_f32_sigmoid__neonfma_rr1_lut2048_p1_nr2fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_lut2048_p1_nr2recps,
+                    xnn_math_f32_sigmoid__neonfma_rr2_lut2048_p1_nr2recps,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_lut2048_p1_nr1recps1fma,
+                    xnn_math_f32_sigmoid__neonfma_rr2_lut2048_p1_nr1recps1fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_lut2048_p1_nr2fma,
+                    xnn_math_f32_sigmoid__neonfma_rr2_lut2048_p1_nr2fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_p5_nr2recps,
+                    xnn_math_f32_sigmoid__neonfma_rr1_p5_nr2recps,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_p5_nr1recps1fma,
+                    xnn_math_f32_sigmoid__neonfma_rr1_p5_nr1recps1fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_p5_nr2fma,
+                    xnn_math_f32_sigmoid__neonfma_rr1_p5_nr2fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_p5_nr2recps,
+                    xnn_math_f32_sigmoid__neonfma_rr2_p5_nr2recps,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_p5_nr1recps1fma,
+                    xnn_math_f32_sigmoid__neonfma_rr2_p5_nr1recps1fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_p5_nr2fma,
+                    xnn_math_f32_sigmoid__neonfma_rr2_p5_nr2fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(SigmoidError, neon_rr2_lut64_p2_nr2recps,
+                    xnn_math_f32_sigmoid__neon_rr2_lut64_p2_nr2recps,
+                    benchmark::utils::CheckNEON)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neon_rr2_lut2048_p1_nr2recps,
+                    xnn_math_f32_sigmoid__neon_rr2_lut2048_p1_nr2recps,
+                    benchmark::utils::CheckNEON)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neon_rr2_p5_nr2recps,
+                    xnn_math_f32_sigmoid__neon_rr2_p5_nr2recps,
+                    benchmark::utils::CheckNEON)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+#if XNN_ARCH_ARM64
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_lut2048_p1_div,
+                    xnn_math_f32_sigmoid__neonfma_rr1_lut2048_p1_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_lut2048_p1_div,
+                    xnn_math_f32_sigmoid__neonfma_rr2_lut2048_p1_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_lut64_p2_div,
+                    xnn_math_f32_sigmoid__neonfma_rr1_lut64_p2_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_lut64_p2_div,
+                    xnn_math_f32_sigmoid__neonfma_rr2_lut64_p2_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr1_p5_div,
+                    xnn_math_f32_sigmoid__neonfma_rr1_p5_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, neonfma_rr2_p5_div,
+                    xnn_math_f32_sigmoid__neonfma_rr2_p5_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_ARM64
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_lut16_p3_perm_scalef_nr1fma,
+                    xnn_math_f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_nr1fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_lut16_p3_perm_scalef_nr1fma1adj,
+                    xnn_math_f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_nr1fma1adj,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_lut16_p3_perm_scalef_div,
+                    xnn_math_f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_div,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_lut16_p3_perm_scalef_nr1fma,
+                    xnn_math_f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_nr1fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_lut16_p3_perm_scalef_nr1fma1adj,
+                    xnn_math_f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_nr1fma1adj,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_lut16_p3_perm_scalef_div,
+                    xnn_math_f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_div,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_lut32_p2_perm2_scalef_nr1fma,
+                    xnn_math_f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_nr1fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_lut32_p2_perm2_scalef_nr1fma1adj,
+                    xnn_math_f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_nr1fma1adj,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_lut32_p2_perm2_scalef_div,
+                    xnn_math_f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_div,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_lut32_p2_perm2_scalef_nr1fma,
+                    xnn_math_f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_lut32_p2_perm2_scalef_nr1fma1adj,
+                    xnn_math_f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma1adj,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_lut32_p2_perm2_scalef_div,
+                    xnn_math_f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_div,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_lut64_p2_gather_scalef_nr1fma,
+                    xnn_math_f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_nr1fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_lut64_p2_gather_scalef_nr1fma1adj,
+                    xnn_math_f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_nr1fma1adj,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_lut64_p2_gather_scalef_div,
+                    xnn_math_f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_div,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_lut64_p2_gather_scalef_nr1fma,
+                    xnn_math_f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_nr1fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_lut64_p2_gather_scalef_nr1fma1adj,
+                    xnn_math_f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_nr1fma1adj,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_lut64_p2_gather_scalef_div,
+                    xnn_math_f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_div,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_p5_scalef_nr1fma,
+                    xnn_math_f32_sigmoid__avx512f_rr1_p5_scalef_nr1fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_p5_scalef_nr1fma1adj,
+                    xnn_math_f32_sigmoid__avx512f_rr1_p5_scalef_nr1fma1adj,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr1_p5_scalef_div,
+                    xnn_math_f32_sigmoid__avx512f_rr1_p5_scalef_div,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_p5_scalef_nr1fma,
+                    xnn_math_f32_sigmoid__avx512f_rr2_p5_scalef_nr1fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_p5_scalef_nr1fma1adj,
+                    xnn_math_f32_sigmoid__avx512f_rr2_p5_scalef_nr1fma1adj,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx512f_rr2_p5_scalef_div,
+                    xnn_math_f32_sigmoid__avx512f_rr2_p5_scalef_div,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_lut64_p2_gather_nr1fma,
+                    xnn_math_f32_sigmoid__avx2_rr1_lut64_p2_gather_nr1fma,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_lut64_p2_gather_nr2fma,
+                    xnn_math_f32_sigmoid__avx2_rr1_lut64_p2_gather_nr2fma,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_lut64_p2_gather_nr2fma1adj,
+                    xnn_math_f32_sigmoid__avx2_rr1_lut64_p2_gather_nr2fma1adj,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_lut64_p2_gather_div,
+                    xnn_math_f32_sigmoid__avx2_rr1_lut64_p2_gather_div,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr2_lut64_p2_gather_nr1fma,
+                    xnn_math_f32_sigmoid__avx2_rr2_lut64_p2_gather_nr1fma,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr2_lut64_p2_gather_nr2fma,
+                    xnn_math_f32_sigmoid__avx2_rr2_lut64_p2_gather_nr2fma,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr2_lut64_p2_gather_nr2fma1adj,
+                    xnn_math_f32_sigmoid__avx2_rr2_lut64_p2_gather_nr2fma1adj,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr2_lut64_p2_gather_div,
+                    xnn_math_f32_sigmoid__avx2_rr2_lut64_p2_gather_div,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_p5_nr1fma,
+                    xnn_math_f32_sigmoid__avx2_rr1_p5_nr1fma,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_p5_nr2fma,
+                    xnn_math_f32_sigmoid__avx2_rr1_p5_nr2fma,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_p5_div,
+                    xnn_math_f32_sigmoid__avx2_rr1_p5_div,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr2_p5_nr1fma,
+                    xnn_math_f32_sigmoid__avx2_rr2_p5_nr1fma,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr2_p5_nr2fma,
+                    xnn_math_f32_sigmoid__avx2_rr2_p5_nr2fma,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx2_rr2_p5_div,
+                    xnn_math_f32_sigmoid__avx2_rr2_p5_div,
+                    benchmark::utils::CheckAVX2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(SigmoidError, avx_rr2_lut64_p2_div,
+                    xnn_math_f32_sigmoid__avx_rr2_lut64_p2_div,
+                    benchmark::utils::CheckAVX)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx_rr2_p5_nr1,
+                    xnn_math_f32_sigmoid__avx_rr2_p5_nr1,
+                    benchmark::utils::CheckAVX)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx_rr2_p5_nr2,
+                    xnn_math_f32_sigmoid__avx_rr2_p5_nr2,
+                    benchmark::utils::CheckAVX)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, avx_rr2_p5_div,
+                    xnn_math_f32_sigmoid__avx_rr2_p5_div,
+                    benchmark::utils::CheckAVX)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(SigmoidError, sse2_rr2_lut64_p2_nr1,
+                    xnn_math_f32_sigmoid__sse2_rr2_lut64_p2_nr1)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, sse2_rr2_lut64_p2_nr2,
+                    xnn_math_f32_sigmoid__sse2_rr2_lut64_p2_nr2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, sse2_rr2_lut64_p2_div,
+                    xnn_math_f32_sigmoid__sse2_rr2_lut64_p2_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, sse2_rr2_p5_nr1,
+                    xnn_math_f32_sigmoid__sse2_rr2_p5_nr1)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, sse2_rr2_p5_nr2,
+                    xnn_math_f32_sigmoid__sse2_rr2_p5_nr2)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, sse2_rr2_p5_div,
+                    xnn_math_f32_sigmoid__sse2_rr2_p5_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+#if XNN_ARCH_WASMSIMD
+  BENCHMARK_CAPTURE(SigmoidError, wasmsimd_rr2_lut64_p2_div,
+                    xnn_math_f32_sigmoid__wasmsimd_rr2_lut64_p2_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SigmoidError, wasmsimd_rr2_p5_div,
+                    xnn_math_f32_sigmoid__wasmsimd_rr2_p5_div)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_WASMSIMD
+
+BENCHMARK_CAPTURE(SigmoidError, scalar_rr2_lut64_p2_div,
+                  xnn_math_f32_sigmoid__scalar_rr2_lut64_p2_div)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(SigmoidError, scalar_rr2_lut2048_p1_div,
+                  xnn_math_f32_sigmoid__scalar_rr2_lut2048_p1_div)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+BENCHMARK_CAPTURE(SigmoidError, scalar_rr2_p5_div,
+                  xnn_math_f32_sigmoid__scalar_rr2_p5_div)
+  ->Unit(benchmark::kMillisecond)
+  ->Iterations(1);
+
+#ifndef XNNPACK_BENCHMARK_NO_MAIN
+BENCHMARK_MAIN();
+#endif
diff --git a/eval/f32-sigmoid.cc b/eval/f32-sigmoid.cc
deleted file mode 100644
index 1d2d00a..0000000
--- a/eval/f32-sigmoid.cc
+++ /dev/null
@@ -1,446 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <algorithm>
-#include <cfloat>
-#include <cmath>
-#include <functional>
-#include <random>
-#include <vector>
-
-#include <benchmark/benchmark.h>
-#include <fp16/fp16.h>
-
-#include <xnnpack/AlignedAllocator.h>
-#include <xnnpack/common.h>
-#include <xnnpack/math-stubs.h>
-
-
-static void SigmoidError(benchmark::State& state,
-  xnn_f32_unary_math_function sigmoid,
-  size_t tile_size)
-{
-  // The smallest x for which sigmoidf(x) is normalized (-0x1.5D589Ep+6f).
-  const uint32_t min_input = 0xC2AEAC4F;
-  // The largest x for which sigmoidf(x) is not 1.0f (0x1.154244p+4f).
-  const uint32_t max_input = 0x418AA122;
-  // Number of tiles in one block of inputs/outputs. Combining multiple tiles in a block reduce function call overhead.
-  const size_t num_tiles = 100;
-
-  double max_ulp_error = 0.0;
-  std::vector<float, AlignedAllocator<float, 64>> x(tile_size * num_tiles);
-  std::vector<float, AlignedAllocator<float, 64>> y(tile_size * num_tiles);
-  for (auto _ : state) {
-    for (uint32_t n = min_input; int32_t(n) < 0; n -= tile_size * num_tiles) {
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        x[i] = fp32_from_bits(std::max<uint32_t>(n - i, 0x80000000));
-      }
-      std::fill(y.begin(), y.end(), std::nanf(""));
-
-      sigmoid(tile_size * num_tiles * sizeof(float), x.data(), y.data());
-
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        const double e_ref = std::exp(double(x[i]));
-        const double y_ref = e_ref / (e_ref + 1.0);
-        const double abs_error = std::abs(y_ref - double(y[i]));
-        const float y_abs = std::abs(y_ref);
-        const float y_ulp = fp32_from_bits(fp32_to_bits(y_abs) + 1) - y_abs;
-        max_ulp_error = std::max<double>(max_ulp_error, abs_error / y_ulp);
-      }
-    }
-    for (uint32_t n = 0; n < max_input; n += tile_size * num_tiles) {
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        x[i] = fp32_from_bits(std::min<uint32_t>(n + i, max_input));
-      }
-      std::fill(y.begin(), y.end(), std::nanf(""));
-
-      sigmoid(tile_size * num_tiles * sizeof(float), x.data(), y.data());
-
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        const double y_ref = 1.0 / (1.0 + std::exp(-double(x[i])));
-        const double abs_error = std::abs(y_ref - double(y[i]));
-        const float y_abs = std::abs(y_ref);
-        const float y_ulp = fp32_from_bits(fp32_to_bits(y_abs) + 1) - y_abs;
-        max_ulp_error = std::max<double>(max_ulp_error, abs_error / y_ulp);
-      }
-    }
-  }
-
-  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
-}
-
-#if XNN_ARCH_ARM || XNN_ARCH_ARM64
-  static void f32_sigmoid__neon_frac_p9_p10_nr1recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neon_frac_p9_p10_nr1recps, 4);
-  }
-
-  static void f32_sigmoid__neon_rr2_lut2048_p1_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neon_rr2_lut2048_p1_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_lut2048_p1_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_lut2048_p1_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_lut2048_p1_nr1recps1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_lut2048_p1_nr1recps1fma, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_lut2048_p1_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_lut2048_p1_nr2fma, 4);
-  }
-
-  static void f32_sigmoid__neon_rr2_lut64_p2_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neon_rr2_lut64_p2_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_lut64_p2_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_lut64_p2_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_lut64_p2_nr1recps1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_lut64_p2_nr1recps1fma, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_lut64_p2_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_lut64_p2_nr2fma, 4);
-  }
-
-  static void f32_sigmoid__neon_rr2_p5_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neon_rr2_p5_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_p5_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_p5_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_p5_nr1recps1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_p5_nr1recps1fma, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_p5_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_p5_nr2fma, 4);
-  }
-
-  static void f32_sigmoid__neon_rr1_lut2048_p1_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neon_rr1_lut2048_p1_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_lut2048_p1_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_lut2048_p1_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_lut2048_p1_nr1recps1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_lut2048_p1_nr1recps1fma, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_lut2048_p1_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_lut2048_p1_nr2fma, 4);
-  }
-
-  static void f32_sigmoid__neon_rr1_lut64_p2_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neon_rr1_lut64_p2_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_lut64_p2_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_lut64_p2_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_lut64_p2_nr1recps1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_lut64_p2_nr1recps1fma, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_lut64_p2_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_lut64_p2_nr2fma, 4);
-  }
-
-  static void f32_sigmoid__neon_rr1_p5_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neon_rr1_p5_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_p5_nr2recps(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_p5_nr2recps, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_p5_nr1recps1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_p5_nr1recps1fma, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_p5_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_p5_nr2fma, 4);
-  }
-
-  BENCHMARK(f32_sigmoid__neon_frac_p9_p10_nr1recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-
-  BENCHMARK(f32_sigmoid__neon_rr2_lut2048_p1_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_lut2048_p1_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_lut2048_p1_nr1recps1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_lut2048_p1_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-
-  BENCHMARK(f32_sigmoid__neon_rr2_lut64_p2_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_lut64_p2_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_lut64_p2_nr1recps1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_lut64_p2_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-
-  BENCHMARK(f32_sigmoid__neon_rr2_p5_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_p5_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_p5_nr1recps1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_p5_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-
-  BENCHMARK(f32_sigmoid__neon_rr1_lut2048_p1_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_lut2048_p1_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_lut2048_p1_nr1recps1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_lut2048_p1_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-
-  BENCHMARK(f32_sigmoid__neon_rr1_lut64_p2_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_lut64_p2_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_lut64_p2_nr1recps1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_lut64_p2_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-
-  BENCHMARK(f32_sigmoid__neon_rr1_p5_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_p5_nr2recps)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_p5_nr1recps1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_p5_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
-
-#if XNN_ARCH_ARM64
-  static void f32_sigmoid__neonfma_rr2_lut2048_p1_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_lut2048_p1_div, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_lut64_p2_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_lut64_p2_div, 4);
-  }
-  static void f32_sigmoid__neonfma_rr2_p5_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr2_p5_div, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_lut2048_p1_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_lut2048_p1_div, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_lut64_p2_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_lut64_p2_div, 4);
-  }
-  static void f32_sigmoid__neonfma_rr1_p5_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__neonfma_rr1_p5_div, 4);
-  }
-
-  BENCHMARK(f32_sigmoid__neonfma_rr2_lut2048_p1_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_lut64_p2_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr2_p5_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_lut2048_p1_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_lut64_p2_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__neonfma_rr1_p5_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_ARM64
-
-#if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  static void f32_sigmoid__avx512f_rr2_p5_scalef_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_p5_scalef_div, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_p5_scalef_nr1fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_p5_scalef_nr1fma1adj, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_p5_scalef_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_p5_scalef_nr1fma, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_p5_scalef_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_p5_scalef_div, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_p5_scalef_nr1fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_p5_scalef_nr1fma1adj, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_p5_scalef_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_p5_scalef_nr1fma, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_div, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_nr1fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_nr1fma1adj, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_nr1fma, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_div, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_nr1fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_nr1fma1adj, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_nr1fma, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_div, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma1adj, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_div, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_nr1fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_nr1fma1adj, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_nr1fma, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_div, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_nr1fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_nr1fma1adj, 16);
-  }
-  static void f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_nr1fma, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_div, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_nr1fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_nr1fma1adj, 16);
-  }
-  static void f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_nr1fma, 16);
-  }
-
-  static void f32_sigmoid__avx2_rr2_p5_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr2_p5_div, 8);
-  }
-  static void f32_sigmoid__avx2_rr2_p5_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr2_p5_nr2fma, 8);
-  }
-  static void f32_sigmoid__avx2_rr2_p5_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr2_p5_nr1fma, 8);
-  }
-  static void f32_sigmoid__avx2_rr1_p5_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr1_p5_div, 8);
-  }
-  static void f32_sigmoid__avx2_rr1_p5_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr1_p5_nr2fma, 8);
-  }
-  static void f32_sigmoid__avx2_rr1_p5_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr1_p5_nr1fma, 8);
-  }
-
-  static void f32_sigmoid__avx2_rr2_lut64_p2_gather_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr2_lut64_p2_gather_div, 8);
-  }
-  static void f32_sigmoid__avx2_rr2_lut64_p2_gather_nr2fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr2_lut64_p2_gather_nr2fma1adj, 8);
-  }
-  static void f32_sigmoid__avx2_rr2_lut64_p2_gather_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr2_lut64_p2_gather_nr2fma, 8);
-  }
-  static void f32_sigmoid__avx2_rr2_lut64_p2_gather_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr2_lut64_p2_gather_nr1fma, 8);
-  }
-  static void f32_sigmoid__avx2_rr1_lut64_p2_gather_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr1_lut64_p2_gather_div, 8);
-  }
-  static void f32_sigmoid__avx2_rr1_lut64_p2_gather_nr2fma1adj(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr1_lut64_p2_gather_nr2fma1adj, 8);
-  }
-  static void f32_sigmoid__avx2_rr1_lut64_p2_gather_nr2fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr1_lut64_p2_gather_nr2fma, 8);
-  }
-  static void f32_sigmoid__avx2_rr1_lut64_p2_gather_nr1fma(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx2_rr1_lut64_p2_gather_nr1fma, 8);
-  }
-
-  static void f32_sigmoid__avx_rr2_p5_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx_rr2_p5_div, 8);
-  }
-  static void f32_sigmoid__avx_rr2_p5_nr2(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx_rr2_p5_nr2, 8);
-  }
-  static void f32_sigmoid__avx_rr2_p5_nr1(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx_rr2_p5_nr1, 8);
-  }
-  static void f32_sigmoid__avx_rr2_lut64_p2_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__avx_rr2_lut64_p2_div, 8);
-  }
-
-  static void f32_sigmoid__sse2_rr2_p5_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__sse2_rr2_p5_div, 4);
-  }
-  static void f32_sigmoid__sse2_rr2_p5_nr2(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__sse2_rr2_p5_nr2, 4);
-  }
-  static void f32_sigmoid__sse2_rr2_p5_nr1(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__sse2_rr2_p5_nr1, 4);
-  }
-  static void f32_sigmoid__sse2_rr2_lut64_p2_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__sse2_rr2_lut64_p2_div, 4);
-  }
-  static void f32_sigmoid__sse2_rr2_lut64_p2_nr2(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__sse2_rr2_lut64_p2_nr2, 4);
-  }
-  static void f32_sigmoid__sse2_rr2_lut64_p2_nr1(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__sse2_rr2_lut64_p2_nr1, 4);
-  }
-
-  BENCHMARK(f32_sigmoid__avx512f_rr2_p5_scalef_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_p5_scalef_nr1fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_p5_scalef_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_p5_scalef_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_p5_scalef_nr1fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_p5_scalef_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_nr1fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_lut16_p3_perm_scalef_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_nr1fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_lut16_p3_perm_scalef_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_nr1fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_lut32_p2_perm2_scalef_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_nr1fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr2_lut64_p2_gather_scalef_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_nr1fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx512f_rr1_lut64_p2_gather_scalef_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr2_p5_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr2_p5_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr2_p5_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr1_p5_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr1_p5_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr1_p5_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr2_lut64_p2_gather_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr2_lut64_p2_gather_nr2fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr2_lut64_p2_gather_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr2_lut64_p2_gather_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr1_lut64_p2_gather_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr1_lut64_p2_gather_nr2fma1adj)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr1_lut64_p2_gather_nr2fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx2_rr1_lut64_p2_gather_nr1fma)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx_rr2_p5_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx_rr2_p5_nr2)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx_rr2_p5_nr1)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__avx_rr2_lut64_p2_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__sse2_rr2_p5_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__sse2_rr2_p5_nr2)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__sse2_rr2_p5_nr1)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__sse2_rr2_lut64_p2_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__sse2_rr2_lut64_p2_nr2)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__sse2_rr2_lut64_p2_nr1)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
-
-#if XNN_ARCH_WASMSIMD
-  static void f32_sigmoid__wasmsimd_rr2_lut64_p2_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__wasmsimd_rr2_lut64_p2_div, 4);
-  }
-  static void f32_sigmoid__wasmsimd_rr2_p5_div(benchmark::State& state) {
-    SigmoidError(state, xnn_math_f32_sigmoid__wasmsimd_rr2_p5_div, 4);
-  }
-
-  BENCHMARK(f32_sigmoid__wasmsimd_rr2_lut64_p2_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK(f32_sigmoid__wasmsimd_rr2_p5_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_WASMSIMD
-
-static void f32_sigmoid__scalar_rr2_lut2048_p1_div(benchmark::State& state) {
-  SigmoidError(state, xnn_math_f32_sigmoid__scalar_rr2_lut2048_p1_div, 1);
-}
-static void f32_sigmoid__scalar_rr2_lut64_p2_div(benchmark::State& state) {
-  SigmoidError(state, xnn_math_f32_sigmoid__scalar_rr2_lut64_p2_div, 1);
-}
-static void f32_sigmoid__scalar_rr2_p5_div(benchmark::State& state) {
-  SigmoidError(state, xnn_math_f32_sigmoid__scalar_rr2_p5_div, 1);
-}
-
-BENCHMARK(f32_sigmoid__scalar_rr2_lut2048_p1_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_sigmoid__scalar_rr2_lut64_p2_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-BENCHMARK(f32_sigmoid__scalar_rr2_p5_div)->Unit(benchmark::kMillisecond)->Iterations(1);
-
-#ifndef XNNPACK_BENCHMARK_NO_MAIN
-BENCHMARK_MAIN();
-#endif
diff --git a/eval/f32-sqrt-ulp.cc b/eval/f32-sqrt-ulp.cc
new file mode 100644
index 0000000..43ec9ee
--- /dev/null
+++ b/eval/f32-sqrt-ulp.cc
@@ -0,0 +1,206 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <algorithm>
+#include <cfloat>
+#include <cmath>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <vector>
+
+#include <cpuinfo.h>
+#include <pthreadpool.h>
+
+#include <benchmark/benchmark.h>
+#include <fp16/fp16.h>
+
+#include "bench/utils.h"
+#include <xnnpack/AlignedAllocator.h>
+#include <xnnpack/common.h>
+#include <xnnpack/math-stubs.h>
+
+
+struct ComputeErrorContext {
+  const float* input;
+  const float* output;
+  float* error;
+};
+
+static void ComputeError(
+  struct ComputeErrorContext* context,
+  size_t start,
+  size_t range)
+{
+  const float* input = context->input;
+  const float* output = context->output;
+  float* error = context->error;
+  for (size_t i = start; i < start + range; i++) {
+    const double output_ref = std::sqrt(double(input[i]));
+    const double abs_error = std::abs(output_ref - double(output[i]));
+    const float output_abs = std::abs(output_ref);
+    const float output_ulp = fp32_from_bits(fp32_to_bits(output_abs) + 1) - output_abs;
+    error[i] = float(abs_error / output_ulp);
+  }
+}
+
+static void SqrtError(benchmark::State& state,
+  xnn_f32_unary_math_function sqrt,
+  benchmark::utils::IsaCheckFunction isa_check = nullptr)
+{
+  if (!cpuinfo_initialize()) {
+    state.SkipWithError("failed cpuinfo init");
+    return;
+  }
+  if (isa_check && !isa_check(state)) {
+    return;
+  }
+
+  const uint32_t min_input = 0x3F800000;
+  const uint32_t max_input = 0x41800000;
+  // Number of elements in one block of inputs/outputs.
+  // Combining multiple elements in a block reduce function call overhead.
+  const size_t block_size = 16384;
+  // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
+  const size_t tile_size = 64;
+
+  uint32_t num_threads = cpuinfo_get_cores_count();
+  #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+    // Use all cores except for the least performant cluster
+    if (cpuinfo_get_clusters_count() > 1) {
+      num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
+    }
+  #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+  std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
+    pthreadpool_create(num_threads), pthreadpool_destroy);
+
+  std::vector<float, AlignedAllocator<float, 64>> x(block_size);
+  std::vector<float, AlignedAllocator<float, 64>> y(block_size);
+  std::vector<float> ulp_error(block_size);
+  float max_ulp_error = 0.0f;
+
+  ComputeErrorContext context;
+  context.input = x.data();
+  context.output = y.data();
+  context.error = ulp_error.data();
+  for (auto _ : state) {
+    for (uint32_t n = min_input; n < max_input; n += block_size) {
+      for (uint32_t i = 0; i < block_size; i++) {
+        x[i] = fp32_from_bits(std::min<uint32_t>(n + i, max_input));
+      }
+      std::fill(y.begin(), y.end(), std::nanf(""));
+
+      sqrt(block_size * sizeof(float), x.data(), y.data());
+
+      pthreadpool_parallelize_1d_tile_1d(
+          threadpool.get(),
+          reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
+          static_cast<void*>(&context),
+          block_size, tile_size, 0 /* flags */);
+
+      max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
+        static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
+    }
+  }
+
+  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
+}
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  BENCHMARK_CAPTURE(SqrtError, neonfma_nr1fma,
+                    xnn_math_f32_sqrt__neonfma_nr1fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, neonfma_nr2fma,
+                    xnn_math_f32_sqrt__neonfma_nr2fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, neonfma_nr3fma,
+                    xnn_math_f32_sqrt__neonfma_nr3fma,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, neonfma_nr2fma1adj,
+                    xnn_math_f32_sqrt__neonfma_nr2fma1adj,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, neonfma_nr1rsqrts1fma1adj,
+                    xnn_math_f32_sqrt__neonfma_nr1rsqrts1fma1adj,
+                    benchmark::utils::CheckNEONFMA)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(SqrtError, neon_nr1rsqrts,
+                    xnn_math_f32_sqrt__neon_nr1rsqrts,
+                    benchmark::utils::CheckNEON)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, neon_nr2rsqrts,
+                    xnn_math_f32_sqrt__neon_nr2rsqrts,
+                    benchmark::utils::CheckNEON)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, neon_nr3rsqrts,
+                    xnn_math_f32_sqrt__neon_nr3rsqrts,
+                    benchmark::utils::CheckNEON)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  BENCHMARK_CAPTURE(SqrtError, avx512f_nr1fma,
+                    xnn_math_f32_sqrt__avx512f_nr1fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, avx512f_nr2fma,
+                    xnn_math_f32_sqrt__avx512f_nr2fma,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, avx512f_nr1fma1adj,
+                    xnn_math_f32_sqrt__avx512f_nr1fma1adj,
+                    benchmark::utils::CheckAVX512F)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(SqrtError, fma3_nr1fma,
+                    xnn_math_f32_sqrt__fma3_nr1fma,
+                    benchmark::utils::CheckFMA3)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, fma3_nr2fma,
+                    xnn_math_f32_sqrt__fma3_nr2fma,
+                    benchmark::utils::CheckFMA3)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, fma3_nr1fma1adj,
+                    xnn_math_f32_sqrt__fma3_nr1fma1adj,
+                    benchmark::utils::CheckFMA3)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+
+  BENCHMARK_CAPTURE(SqrtError, sse_nr1mac,
+                    xnn_math_f32_sqrt__sse_nr1mac)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, sse_nr2mac,
+                    xnn_math_f32_sqrt__sse_nr2mac)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+  BENCHMARK_CAPTURE(SqrtError, sse_hh1mac,
+                    xnn_math_f32_sqrt__sse_hh1mac)
+    ->Unit(benchmark::kMillisecond)
+    ->Iterations(1);
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+#ifndef XNNPACK_BENCHMARK_NO_MAIN
+BENCHMARK_MAIN();
+#endif
diff --git a/eval/f32-sqrt.cc b/eval/f32-sqrt.cc
deleted file mode 100644
index 9112dd3..0000000
--- a/eval/f32-sqrt.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <algorithm>
-#include <cfloat>
-#include <cmath>
-#include <functional>
-#include <random>
-#include <vector>
-
-#include <benchmark/benchmark.h>
-#include <fp16/fp16.h>
-
-#include "bench/utils.h"
-#include <xnnpack/AlignedAllocator.h>
-#include <xnnpack/common.h>
-#include <xnnpack/math-stubs.h>
-
-
-static void SqrtError(benchmark::State& state,
-  xnn_f32_unary_math_function sqrt,
-  size_t tile_size,
-  benchmark::utils::IsaCheckFunction isa_check = nullptr)
-{
-  if (isa_check && !isa_check(state)) {
-    return;
-  }
-
-  const uint32_t min_input = 0x3F800000;
-  const uint32_t max_input = 0x41800000;
-  // Number of tiles in one block of inputs/outputs. Combining multiple tiles in a block reduce function call overhead.
-  const size_t num_tiles = 100;
-
-  double max_ulp_error = 0.0;
-  std::vector<float, AlignedAllocator<float, 64>> x(tile_size * num_tiles);
-  std::vector<float, AlignedAllocator<float, 64>> y(tile_size * num_tiles);
-  for (auto _ : state) {
-    for (uint32_t n = min_input; n < max_input; n += tile_size * num_tiles) {
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        x[i] = fp32_from_bits(std::min<uint32_t>(n + i, max_input));
-      }
-      std::fill(y.begin(), y.end(), std::nanf(""));
-
-      sqrt(tile_size * num_tiles * sizeof(float), x.data(), y.data());
-
-      for (uint32_t i = 0; i < tile_size * num_tiles; i++) {
-        const double y_ref = std::sqrt(double(x[i]));
-        const double abs_error = std::abs(y_ref - double(y[i]));
-        const float y_abs = std::abs(y_ref);
-        const float y_ulp = fp32_from_bits(fp32_to_bits(y_abs) + 1) - y_abs;
-        max_ulp_error = std::max<double>(max_ulp_error, abs_error / y_ulp);
-      }
-    }
-  }
-
-  state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
-}
-
-#if XNN_ARCH_X86 || XNN_ARCH_X86_64
-  BENCHMARK_CAPTURE(SqrtError, sse_nr1mac, xnn_math_f32_sqrt__sse_nr1mac, 4)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, sse_nr2mac, xnn_math_f32_sqrt__sse_nr2mac, 4)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, sse_hh1mac, xnn_math_f32_sqrt__sse_hh1mac, 4)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, fma3_nr1fma, xnn_math_f32_sqrt__fma3_nr1fma, 8, benchmark::utils::CheckFMA3)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, fma3_nr2fma, xnn_math_f32_sqrt__fma3_nr2fma, 8, benchmark::utils::CheckFMA3)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, fma3_nr1fma1adj, xnn_math_f32_sqrt__fma3_nr1fma1adj, 8, benchmark::utils::CheckFMA3)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, avx512f_nr1fma, xnn_math_f32_sqrt__avx512f_nr1fma, 16, benchmark::utils::CheckAVX512F)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, avx512f_nr2fma, xnn_math_f32_sqrt__avx512f_nr2fma, 16, benchmark::utils::CheckAVX512F)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, avx512f_nr1fma1adj, xnn_math_f32_sqrt__avx512f_nr1fma1adj, 16, benchmark::utils::CheckAVX512F)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
-
-#if XNN_ARCH_ARM || XNN_ARCH_ARM64
-  BENCHMARK_CAPTURE(SqrtError, neon_nr1rsqrts, xnn_math_f32_sqrt__neon_nr1rsqrts, 4, benchmark::utils::CheckNEON)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, neon_nr2rsqrts, xnn_math_f32_sqrt__neon_nr2rsqrts, 4, benchmark::utils::CheckNEON)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, neon_nr3rsqrts, xnn_math_f32_sqrt__neon_nr3rsqrts, 4, benchmark::utils::CheckNEON)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, neonfma_nr1fma, xnn_math_f32_sqrt__neonfma_nr1fma, 4, benchmark::utils::CheckNEONFMA)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, neonfma_nr2fma, xnn_math_f32_sqrt__neonfma_nr2fma, 4, benchmark::utils::CheckNEONFMA)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, neonfma_nr3fma, xnn_math_f32_sqrt__neonfma_nr3fma, 4, benchmark::utils::CheckNEONFMA)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, neonfma_nr2fma1adj, xnn_math_f32_sqrt__neonfma_nr2fma1adj, 4, benchmark::utils::CheckNEONFMA)->Unit(benchmark::kMillisecond)->Iterations(1);
-  BENCHMARK_CAPTURE(SqrtError, neonfma_nr1rsqrts1fma1adj, xnn_math_f32_sqrt__neonfma_nr1rsqrts1fma1adj, 4, benchmark::utils::CheckNEONFMA)->Unit(benchmark::kMillisecond)->Iterations(1);
-#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
-
-#ifndef XNNPACK_BENCHMARK_NO_MAIN
-BENCHMARK_MAIN();
-#endif
diff --git a/include/xnnpack.h b/include/xnnpack.h
index 9ed6f69..72ff363 100644
--- a/include/xnnpack.h
+++ b/include/xnnpack.h
@@ -27,6 +27,11 @@
 /// Maximum number of dimensions in tensor shape.
 #define XNN_MAX_TENSOR_DIMS 6
 
+/// Allow sparse inference in a Runtime.
+///
+/// Note: this flag forces XNNPACK to consider sparse inference, but does not guarantee it.
+#define XNN_FLAG_SPARSE_INFERENCE 0x00000001
+
 /// The convolution operator represents a depthwise convolution, and use HWGo layout for filters.
 #define XNN_FLAG_DEPTHWISE_CONVOLUTION 0x00000001
 
@@ -909,6 +914,21 @@
   uint32_t output_id,
   uint32_t flags);
 
+/// Define an ELU (Exponential Linear Unit) Node and add it to a Subgraph.
+///
+/// @param subgraph - a Subgraph object that will own the created Node.
+/// @param alpha - scale factor for negative output elements.
+/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph.
+/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its
+///                    shape must match the shape of the input tensor.
+/// @param flags - binary features of the ELU Node. No supported flags are currently defined.
+enum xnn_status xnn_define_elu(
+  xnn_subgraph_t subgraph,
+  float alpha,
+  uint32_t input_id,
+  uint32_t output_id,
+  uint32_t flags);
+
 /// Define a Floor Node and add it to a Subgraph.
 ///
 /// @param subgraph - a Subgraph object that will own the created Node.
@@ -1019,13 +1039,13 @@
 /// Runtime is a combination of an execution plan for subgraph Nodes and a memory manager for subgraph Values.
 typedef struct xnn_runtime* xnn_runtime_t;
 
-/// Create a empty Runtime object from a subgraph.
+/// Create a Runtime object from a subgraph.
 ///
 /// @param subgraph - a Subgraph object with all Values and Nodes that would be handled by the runtime. No Values or
 ///                   Nodes can be added to the runtime once it is constructed.
 /// @param threadpool - the thread pool to be used for parallelisation of computations in the runtime. If the thread
 ///                     pool is NULL, the computation would run on the caller thread without parallelization.
-/// @param flags - binary features of the subgraph. No supported flags are currently defined.
+/// @param flags - binary features of the runtime. The only currently supported value is XNN_FLAG_SPARSE_INFERENCE.
 /// @param runtime_out - pointer to the variable that will be initialized with a handle to the Runtime object upon
 ///                      successful return. Once constructed, the Runtime object is independent of the Subgraph object
 ///                      used to create it.
@@ -1286,6 +1306,21 @@
   float* output,
   pthreadpool_t threadpool);
 
+enum xnn_status xnn_create_elu_nc_f32(
+  size_t channels,
+  size_t input_stride,
+  size_t output_stride,
+  float alpha,
+  uint32_t flags,
+  xnn_operator_t* elu_op_out);
+
+enum xnn_status xnn_setup_elu_nc_f32(
+  xnn_operator_t elu_op,
+  size_t batch_size,
+  const float* input,
+  float* output,
+  pthreadpool_t threadpool);
+
 enum xnn_status xnn_create_fully_connected_nc_f32(
   size_t input_channels,
   size_t output_channels,
diff --git a/scripts/generate-f32-dwconv.sh b/scripts/generate-f32-dwconv.sh
index d6b9c9e..d8ebac5 100755
--- a/scripts/generate-f32-dwconv.sh
+++ b/scripts/generate-f32-dwconv.sh
@@ -69,40 +69,40 @@
 
 ################################## WAsm SIMD ##################################
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=4 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x4-minmax-wasmsimd-arm.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=4 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-arm.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=4 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x4-minmax-wasmsimd-arm-acc2.c
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=4 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x4-minmax-wasmsimd-arm.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=4 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-arm.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=4 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x4-minmax-wasmsimd-arm-acc2.c
 
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=4 -D ACCUMULATORS=1 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x4-minmax-wasmsimd-x86.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=4 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-x86.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=4 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x4-minmax-wasmsimd-x86-acc2.c
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=4 -D ACCUMULATORS=1 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x4-minmax-wasmsimd-x86.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=4 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-x86.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=4 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x4-minmax-wasmsimd-x86-acc2.c
 
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=4 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=LINEAR -o src/f32-dwconv/gen/up4x4-wasmsimd.c
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=4 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=LINEAR -o src/f32-dwconv/gen/up8x4-wasmsimd.c
 
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x9-minmax-wasmsimd-arm.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-arm.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x9-minmax-wasmsimd-arm-acc2.c
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x9-minmax-wasmsimd-arm.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-arm.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x9-minmax-wasmsimd-arm-acc2.c
 
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D ACCUMULATORS=1 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x9-minmax-wasmsimd-x86.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-x86.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x9-minmax-wasmsimd-x86-acc2.c
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D ACCUMULATORS=1 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x9-minmax-wasmsimd-x86.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-x86.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x9-minmax-wasmsimd-x86-acc2.c
 
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=9 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=LINEAR -o src/f32-dwconv/gen/up4x9-wasmsimd.c
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=LINEAR -o src/f32-dwconv/gen/up8x9-wasmsimd.c
 
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x25-minmax-wasmsimd-arm.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-arm.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x25-minmax-wasmsimd-arm-acc2.c
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x25-minmax-wasmsimd-arm.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-arm.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D ACCUMULATORS=2 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x25-minmax-wasmsimd-arm-acc2.c
 
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D ACCUMULATORS=1 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x25-minmax-wasmsimd-x86.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-x86.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up4x25-minmax-wasmsimd-x86-acc2.c
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D ACCUMULATORS=1 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x25-minmax-wasmsimd-x86.c
-tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-x86.c
+tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D ACCUMULATORS=2 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-dwconv/gen/up8x25-minmax-wasmsimd-x86-acc2.c
 
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=4 -D KERNEL_TILE=25 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=LINEAR -o src/f32-dwconv/gen/up4x25-wasmsimd.c
 tools/xngen src/f32-dwconv/up-wasmsimd.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D ACCUMULATORS=1 -D X86=0 -D ACTIVATION=LINEAR -o src/f32-dwconv/gen/up8x25-wasmsimd.c
diff --git a/scripts/generate-f32-dwconv2d-chw.sh b/scripts/generate-f32-dwconv2d-chw.sh
index f997ae6..4fe0de4 100755
--- a/scripts/generate-f32-dwconv2d-chw.sh
+++ b/scripts/generate-f32-dwconv2d-chw.sh
@@ -213,35 +213,199 @@
 tools/xngen src/f32-dwconv2d-chw/5x5s2p2-scalar.c.in -D ROW_TILE=3 -D ACCUMULATORS=2 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-scalar-3x1-acc2.c
 
 ################################## WAsm SIMD ##################################
-tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-1x4-acc3.c.in -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-arm.c
-tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-3x4.c.in        -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-arm.c
-tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-1x4-acc2.c.in -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-arm.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-4x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-5x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=6 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-6x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
 
-tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-1x4-acc3.c.in -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-x86.c
-tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-3x4.c.in        -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-x86.c
-tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-1x4-acc2.c.in -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-x86.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-4x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-5x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=6 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-6x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
 
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-3x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-4x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-5x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=6 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-6x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc2.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc3.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-4x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-5x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=6 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-6x4.c
 
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-3x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-4x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-5x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=6 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-6x4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc2.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc3.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc4.c
-tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-4x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-5x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=6 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-6x4.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-4x4.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-4x4.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-4x4.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-4x4.c
+
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-5x4.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=5 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=4 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-5x4.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=5 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in -D ROW_TILE=4 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-5x4.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=5 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc5.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=4 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=4 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=5 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-5x4.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=5 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc5.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in -D ROW_TILE=4 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=5 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=1 -D ACCUMULATORS=5 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=2 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in -D ROW_TILE=3 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=5 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc5.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=3 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=2 -D X86=0 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4-acc2.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=1 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4.c
+
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=4 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc4.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=1 -D ACCUMULATORS=5 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc5.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc2.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=2 -D ACCUMULATORS=3 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc3.c
+tools/xngen src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in -D ROW_TILE=3 -D ACCUMULATORS=2 -D X86=1 -o src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4-acc2.c
 
 ################################## Unit tests #################################
 tools/generate-dwconv2d-chw-test.py --spec test/f32-dwconv2d-chw.yaml --output test/f32-dwconv2d-chw.cc
diff --git a/scripts/generate-f32-gemm.sh b/scripts/generate-f32-gemm.sh
index bff53b1..04ada2d 100755
--- a/scripts/generate-f32-gemm.sh
+++ b/scripts/generate-f32-gemm.sh
@@ -187,50 +187,50 @@
 
 ################################## WAsm SIMD ##################################
 ### LOAD1+BROADCAST micro-kernels
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-loadsplat.c
 
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-loadsplat.c
 
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-loadsplat.c
 
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-loadsplat.c
 
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-loadsplat.c
 
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-loadsplat.c
 
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-loadsplat.c
 
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-loadsplat.c
 
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-loadsplat.c
 
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c
+tools/xngen src/f32-gemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-loadsplat.c
 ### LOAD4+DUPLICATE micro-kernels
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-splat.c
 
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-splat.c
 
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-splat.c
 
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-splat.c
 
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-arm.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=0 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-splat.c
 
 tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=RELU   -o src/f32-gemm/gen/1x8-relu-wasmsimd-splat.c
 tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=RELU   -o src/f32-gemm/gen/4x8-relu-wasmsimd-splat.c
@@ -240,20 +240,20 @@
 tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=LINEAR -o src/f32-gemm/gen/4x8-wasmsimd-splat.c
 tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=LINEAR -o src/f32-gemm/gen/5x8-wasmsimd-splat.c
 
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-splat.c
 
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-splat.c
 
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-splat.c
 
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-splat.c
 
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-x86.c
-tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-x86.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=1 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-splat.c
+tools/xngen src/f32-gemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=1 -D INC=1 -D ACTIVATION=MINMAX -o src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-splat.c
 
 ### LOAD4+PERMUTE micro-kernels
 tools/xngen src/f32-gemm/wasmsimd-s4.c.in -D MR=1 -D NR=8 -D X86=0 -D INC=0 -D ACTIVATION=MINMAX -o src/f32-gemm/gen/1x8s4-minmax-wasmsimd-arm.c
@@ -300,20 +300,53 @@
 tools/xngen src/f32-gemm/sse-load1.c.in -D MR=1 -D NR=8 -D INC=0 -o src/f32-gemm/gen/1x8-minmax-sse-load1.c
 tools/xngen src/f32-gemm/sse-load1.c.in -D MR=1 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/1x8inc-minmax-sse-load1.c
 
+tools/xngen src/f32-gemm/sse-load1.c.in -D MR=3 -D NR=8 -D INC=0 -o src/f32-gemm/gen/3x8-minmax-sse-load1.c
+tools/xngen src/f32-gemm/sse-load1.c.in -D MR=3 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/3x8inc-minmax-sse-load1.c
+
 tools/xngen src/f32-gemm/sse-load1.c.in -D MR=4 -D NR=8 -D INC=0 -o src/f32-gemm/gen/4x8-minmax-sse-load1.c
 tools/xngen src/f32-gemm/sse-load1.c.in -D MR=4 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/4x8inc-minmax-sse-load1.c
-### LOAD4+DUPLICATE micro-kernels
-tools/xngen src/f32-gemm/sse-dup.c.in -D MR=1 -D NR=8 -D INC=0 -o src/f32-gemm/gen/1x8-minmax-sse-dup.c
-tools/xngen src/f32-gemm/sse-dup.c.in -D MR=1 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/1x8inc-minmax-sse-dup.c
 
-tools/xngen src/f32-gemm/sse-dup.c.in -D MR=4 -D NR=8 -D INC=0 -o src/f32-gemm/gen/4x8-minmax-sse-dup.c
-tools/xngen src/f32-gemm/sse-dup.c.in -D MR=4 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/4x8inc-minmax-sse-dup.c
+tools/xngen src/f32-gemm/sse-load1.c.in -D MR=5 -D NR=8 -D INC=0 -o src/f32-gemm/gen/5x8-minmax-sse-load1.c
+tools/xngen src/f32-gemm/sse-load1.c.in -D MR=5 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/5x8inc-minmax-sse-load1.c
+
+### LOAD4+DUPLICATE micro-kernels
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=1 -D NR=8 -D INC=0 -D SSE=1 -o src/f32-gemm/gen/1x8-minmax-sse-dup.c
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=1 -D NR=8 -D INC=1 -D SSE=1 -o src/f32-gemm/gen-inc/1x8inc-minmax-sse-dup.c
+
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=3 -D NR=8 -D INC=0 -D SSE=1 -o src/f32-gemm/gen/3x8-minmax-sse-dup.c
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=3 -D NR=8 -D INC=1 -D SSE=1 -o src/f32-gemm/gen-inc/3x8inc-minmax-sse-dup.c
+
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=4 -D NR=8 -D INC=0 -D SSE=1 -o src/f32-gemm/gen/4x8-minmax-sse-dup.c
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=4 -D NR=8 -D INC=1 -D SSE=1 -o src/f32-gemm/gen-inc/4x8inc-minmax-sse-dup.c
+
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=5 -D NR=8 -D INC=0 -D SSE=1 -o src/f32-gemm/gen/5x8-minmax-sse-dup.c
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=5 -D NR=8 -D INC=1 -D SSE=1 -o src/f32-gemm/gen-inc/5x8inc-minmax-sse-dup.c
+
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=1 -D NR=8 -D INC=0 -D SSE=2 -o src/f32-gemm/gen/1x8-minmax-sse2-dup.c
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=1 -D NR=8 -D INC=1 -D SSE=2 -o src/f32-gemm/gen-inc/1x8inc-minmax-sse2-dup.c
+
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=3 -D NR=8 -D INC=0 -D SSE=2 -o src/f32-gemm/gen/3x8-minmax-sse2-dup.c
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=3 -D NR=8 -D INC=1 -D SSE=2 -o src/f32-gemm/gen-inc/3x8inc-minmax-sse2-dup.c
+
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=4 -D NR=8 -D INC=0 -D SSE=2 -o src/f32-gemm/gen/4x8-minmax-sse2-dup.c
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=4 -D NR=8 -D INC=1 -D SSE=2 -o src/f32-gemm/gen-inc/4x8inc-minmax-sse2-dup.c
+
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=5 -D NR=8 -D INC=0 -D SSE=2 -o src/f32-gemm/gen/5x8-minmax-sse2-dup.c
+tools/xngen src/f32-gemm/sse-dup.c.in -D MR=5 -D NR=8 -D INC=1 -D SSE=2 -o src/f32-gemm/gen-inc/5x8inc-minmax-sse2-dup.c
+
 ### LOAD4+PERMUTE micro-kernels
 tools/xngen src/f32-gemm/sse-shuffle.c.in -D MR=1 -D NR=8 -D INC=0 -o src/f32-gemm/gen/1x8s4-minmax-sse.c
 tools/xngen src/f32-gemm/sse-shuffle.c.in -D MR=1 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/1x8s4inc-minmax-sse.c
 
+tools/xngen src/f32-gemm/sse-shuffle.c.in -D MR=3 -D NR=8 -D INC=0 -o src/f32-gemm/gen/3x8s4-minmax-sse.c
+tools/xngen src/f32-gemm/sse-shuffle.c.in -D MR=3 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/3x8s4inc-minmax-sse.c
+
 tools/xngen src/f32-gemm/sse-shuffle.c.in -D MR=4 -D NR=8 -D INC=0 -o src/f32-gemm/gen/4x8s4-minmax-sse.c
 tools/xngen src/f32-gemm/sse-shuffle.c.in -D MR=4 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/4x8s4inc-minmax-sse.c
+
+tools/xngen src/f32-gemm/sse-shuffle.c.in -D MR=5 -D NR=8 -D INC=0 -o src/f32-gemm/gen/5x8s4-minmax-sse.c
+tools/xngen src/f32-gemm/sse-shuffle.c.in -D MR=5 -D NR=8 -D INC=1 -o src/f32-gemm/gen-inc/5x8s4inc-minmax-sse.c
+
 ### MRx2 micro-kernels
 tools/xngen src/f32-gemm/MRx2c4-sse.c.in -D MR=4 -D NR=2 -o src/f32-gemm/gen/4x2c4-minmax-sse.c
 
diff --git a/scripts/generate-f32-igemm.sh b/scripts/generate-f32-igemm.sh
index 91f05eb..a9ef5c7 100755
--- a/scripts/generate-f32-igemm.sh
+++ b/scripts/generate-f32-igemm.sh
@@ -39,24 +39,24 @@
 
 ################################## WAsm SIMD ##################################
 ### LOAD1+BROADCAST micro-kernels
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c
 
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c
-tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=1 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=3 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=4 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=5 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c
+tools/xngen src/f32-igemm/wasmsimd-loadsplat.c.in -D MR=6 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c
 
 ### LOAD4+DUPLICATE micro-kernels
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-arm.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-splat.c
 
 tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=0 -D ACTIVATION=RELU   -o src/f32-igemm/gen/1x8-relu-wasmsimd-splat.c
 tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D ACTIVATION=RELU   -o src/f32-igemm/gen/4x8-relu-wasmsimd-splat.c
@@ -66,11 +66,11 @@
 tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -D ACTIVATION=LINEAR -o src/f32-igemm/gen/4x8-wasmsimd-splat.c
 tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=0 -D ACTIVATION=LINEAR -o src/f32-igemm/gen/5x8-wasmsimd-splat.c
 
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-x86.c
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-x86.c
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-x86.c
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-x86.c
-tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-x86.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=1 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-splat.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=3 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-splat.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-splat.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=5 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-splat.c
+tools/xngen src/f32-igemm/wasmsimd-splat.c.in -D MR=6 -D NR=8 -D X86=1 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-splat.c
 
 ### LOAD4+PERMUTE micro-kernels
 tools/xngen src/f32-igemm/wasmsimd-s4.c.in -D MR=1 -D NR=8 -D X86=0 -D ACTIVATION=MINMAX -o src/f32-igemm/gen/1x8s4-minmax-wasmsimd-arm.c
@@ -151,13 +151,27 @@
 ################################### x86 SSE ###################################
 ### LOAD1+BROADCAST micro-kernels
 tools/xngen src/f32-igemm/sse-load1.c.in -D MR=1 -D NR=8 -o src/f32-igemm/gen/1x8-minmax-sse-load1.c
+tools/xngen src/f32-igemm/sse-load1.c.in -D MR=3 -D NR=8 -o src/f32-igemm/gen/3x8-minmax-sse-load1.c
 tools/xngen src/f32-igemm/sse-load1.c.in -D MR=4 -D NR=8 -o src/f32-igemm/gen/4x8-minmax-sse-load1.c
+tools/xngen src/f32-igemm/sse-load1.c.in -D MR=5 -D NR=8 -o src/f32-igemm/gen/5x8-minmax-sse-load1.c
+
 ### LOAD4+DUPLICATE micro-kernels
-tools/xngen src/f32-igemm/sse-dup.c.in -D MR=1 -D NR=8 -o src/f32-igemm/gen/1x8-minmax-sse-dup.c
-tools/xngen src/f32-igemm/sse-dup.c.in -D MR=4 -D NR=8 -o src/f32-igemm/gen/4x8-minmax-sse-dup.c
+tools/xngen src/f32-igemm/sse-dup.c.in -D MR=1 -D NR=8 -D SSE=1 -o src/f32-igemm/gen/1x8-minmax-sse-dup.c
+tools/xngen src/f32-igemm/sse-dup.c.in -D MR=3 -D NR=8 -D SSE=1 -o src/f32-igemm/gen/3x8-minmax-sse-dup.c
+tools/xngen src/f32-igemm/sse-dup.c.in -D MR=4 -D NR=8 -D SSE=1 -o src/f32-igemm/gen/4x8-minmax-sse-dup.c
+tools/xngen src/f32-igemm/sse-dup.c.in -D MR=5 -D NR=8 -D SSE=1 -o src/f32-igemm/gen/5x8-minmax-sse-dup.c
+
+tools/xngen src/f32-igemm/sse-dup.c.in -D MR=1 -D NR=8 -D SSE=2 -o src/f32-igemm/gen/1x8-minmax-sse2-dup.c
+tools/xngen src/f32-igemm/sse-dup.c.in -D MR=3 -D NR=8 -D SSE=2 -o src/f32-igemm/gen/3x8-minmax-sse2-dup.c
+tools/xngen src/f32-igemm/sse-dup.c.in -D MR=4 -D NR=8 -D SSE=2 -o src/f32-igemm/gen/4x8-minmax-sse2-dup.c
+tools/xngen src/f32-igemm/sse-dup.c.in -D MR=5 -D NR=8 -D SSE=2 -o src/f32-igemm/gen/5x8-minmax-sse2-dup.c
+
 ### LOAD4+PERMUTE micro-kernels
 tools/xngen src/f32-igemm/sse-shuffle.c.in -D MR=1 -D NR=8 -o src/f32-igemm/gen/1x8s4-minmax-sse.c
+tools/xngen src/f32-igemm/sse-shuffle.c.in -D MR=3 -D NR=8 -o src/f32-igemm/gen/3x8s4-minmax-sse.c
 tools/xngen src/f32-igemm/sse-shuffle.c.in -D MR=4 -D NR=8 -o src/f32-igemm/gen/4x8s4-minmax-sse.c
+tools/xngen src/f32-igemm/sse-shuffle.c.in -D MR=5 -D NR=8 -o src/f32-igemm/gen/5x8s4-minmax-sse.c
+
 ### MRx2 micro-kernels
 tools/xngen src/f32-igemm/MRx2c4-sse.c.in -D MR=4 -D NR=2 -o src/f32-igemm/gen/4x2c4-minmax-sse.c
 
diff --git a/scripts/generate-f32-ppmm.sh b/scripts/generate-f32-ppmm.sh
index b5f3b36..bdc2276 100755
--- a/scripts/generate-f32-ppmm.sh
+++ b/scripts/generate-f32-ppmm.sh
@@ -11,8 +11,8 @@
 tools/xngen src/f32-ppmm/scalar.c.in -D MR=3 -D NR=3 -o src/f32-ppmm/gen/3x3-minmax-scalar.c
 
 ################################## WAsm SIMD ##################################
-tools/xngen src/f32-ppmm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -o src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-arm.c
-tools/xngen src/f32-ppmm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=1 -o src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-x86.c
+tools/xngen src/f32-ppmm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=0 -o src/f32-ppmm/gen/4x8-minmax-wasmsimd-arm-splat.c
+tools/xngen src/f32-ppmm/wasmsimd-splat.c.in -D MR=4 -D NR=8 -D X86=1 -o src/f32-ppmm/gen/4x8-minmax-wasmsimd-x86-splat.c
 
 ################################### ARM NEON ##################################
 tools/xngen src/f32-ppmm/neon.c.in -D MR=4 -D NR=8 -D FMA=0 -o src/f32-ppmm/gen/4x8-minmax-neon.c
diff --git a/scripts/generate-f32-sigmoid.sh b/scripts/generate-f32-sigmoid.sh
index f45c818..eef5463 100755
--- a/scripts/generate-f32-sigmoid.sh
+++ b/scripts/generate-f32-sigmoid.sh
@@ -110,8 +110,6 @@
 tools/xngen src/f32-sigmoid/neon-lut2048-p1.c.in -D BATCH_TILE=20 -D RR_STEPS=2 -D FMA=0 -D DIV_ALGO=nr2recps -o src/f32-sigmoid/gen/neon-rr2-lut2048-p1-nr2recps-x20.c
 tools/xngen src/f32-sigmoid/neon-lut2048-p1.c.in -D BATCH_TILE=24 -D RR_STEPS=2 -D FMA=0 -D DIV_ALGO=nr2recps -o src/f32-sigmoid/gen/neon-rr2-lut2048-p1-nr2recps-x24.c
 
-tools/xngen src/f32-sigmoid/neon-frac-p9-p10-nr1recps.c.in -D BATCH_TILE=16 -o src/f32-sigmoid/gen/neon-frac-p9-p10-nr1recps-x16.c
-
 ################################### x86 SSE ###################################
 tools/xngen src/f32-sigmoid/sse-p5-div.c.in -D BATCH_TILE=4  -D SSE=2 -o src/f32-sigmoid/gen/sse2-p5-div-x4.c
 tools/xngen src/f32-sigmoid/sse-p5-div.c.in -D BATCH_TILE=8  -D SSE=2 -o src/f32-sigmoid/gen/sse2-p5-div-x8.c
diff --git a/scripts/generate-f32-spmm.sh b/scripts/generate-f32-spmm.sh
index 55824ac..a83aed3 100755
--- a/scripts/generate-f32-spmm.sh
+++ b/scripts/generate-f32-spmm.sh
@@ -20,17 +20,30 @@
 
 ################################### ARM NEON ##################################
 ### Microkernels without unrolling
+tools/xngen src/f32-spmm/neon.c.in -D MR=4  -D NR=1 -D UNROLL=1 -D FMA=0 -o src/f32-spmm/gen/4x1-minmax-neon.c
+tools/xngen src/f32-spmm/neon.c.in -D MR=8  -D NR=1 -D UNROLL=1 -D FMA=0 -o src/f32-spmm/gen/8x1-minmax-neon.c
+tools/xngen src/f32-spmm/neon.c.in -D MR=12 -D NR=1 -D UNROLL=1 -D FMA=0 -o src/f32-spmm/gen/12x1-minmax-neon.c
+tools/xngen src/f32-spmm/neon.c.in -D MR=16 -D NR=1 -D UNROLL=1 -D FMA=0 -o src/f32-spmm/gen/16x1-minmax-neon.c
+tools/xngen src/f32-spmm/neon.c.in -D MR=32 -D NR=1 -D UNROLL=1 -D FMA=0 -o src/f32-spmm/gen/32x1-minmax-neon.c
+
 tools/xngen src/f32-spmm/neon.c.in -D MR=4  -D NR=1 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/4x1-minmax-neonfma.c
 tools/xngen src/f32-spmm/neon.c.in -D MR=8  -D NR=1 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/8x1-minmax-neonfma.c
 tools/xngen src/f32-spmm/neon.c.in -D MR=12 -D NR=1 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/12x1-minmax-neonfma.c
 tools/xngen src/f32-spmm/neon.c.in -D MR=16 -D NR=1 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/16x1-minmax-neonfma.c
 tools/xngen src/f32-spmm/neon.c.in -D MR=32 -D NR=1 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/32x1-minmax-neonfma.c
+
 ### Microkernels with 2X unrolling
+tools/xngen src/f32-spmm/neon.c.in -D MR=4  -D NR=1 -D UNROLL=2 -D FMA=0 -o src/f32-spmm/gen/4x1-minmax-neon-x2.c
+tools/xngen src/f32-spmm/neon.c.in -D MR=8  -D NR=1 -D UNROLL=2 -D FMA=0 -o src/f32-spmm/gen/8x1-minmax-neon-x2.c
+tools/xngen src/f32-spmm/neon.c.in -D MR=16 -D NR=1 -D UNROLL=2 -D FMA=0 -o src/f32-spmm/gen/16x1-minmax-neon-x2.c
+tools/xngen src/f32-spmm/neon.c.in -D MR=32 -D NR=1 -D UNROLL=2 -D FMA=0 -o src/f32-spmm/gen/32x1-minmax-neon-x2.c
+
 tools/xngen src/f32-spmm/neon.c.in -D MR=4  -D NR=1 -D UNROLL=2 -D FMA=1 -o src/f32-spmm/gen/4x1-minmax-neonfma-x2.c
 tools/xngen src/f32-spmm/neon.c.in -D MR=8  -D NR=1 -D UNROLL=2 -D FMA=1 -o src/f32-spmm/gen/8x1-minmax-neonfma-x2.c
 tools/xngen src/f32-spmm/neon.c.in -D MR=16 -D NR=1 -D UNROLL=2 -D FMA=1 -o src/f32-spmm/gen/16x1-minmax-neonfma-x2.c
 tools/xngen src/f32-spmm/neon.c.in -D MR=32 -D NR=1 -D UNROLL=2 -D FMA=1 -o src/f32-spmm/gen/32x1-minmax-neonfma-x2.c
-### Microkernels for blocks of several output channels
+
+### Microkernels for blocks of output channels
 tools/xngen src/f32-spmm/neon-blocked.c.in -D MR=4  -D NR=2 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/4x2-minmax-neonfma.c
 tools/xngen src/f32-spmm/neon-blocked.c.in -D MR=8  -D NR=2 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/8x2-minmax-neonfma.c
 tools/xngen src/f32-spmm/neon-blocked.c.in -D MR=12 -D NR=2 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/12x2-minmax-neonfma.c
@@ -41,7 +54,13 @@
 tools/xngen src/f32-spmm/neon-blocked.c.in -D MR=12 -D NR=4 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/12x4-minmax-neonfma.c
 tools/xngen src/f32-spmm/neon-blocked.c.in -D MR=16 -D NR=4 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/16x4-minmax-neonfma.c
 tools/xngen src/f32-spmm/neon-blocked.c.in -D MR=32 -D NR=4 -D UNROLL=1 -D FMA=1 -o src/f32-spmm/gen/32x4-minmax-neonfma.c
+
 ### Microkernels with software pipelining
+tools/xngen src/f32-spmm/neon-pipelined.c.in -D MR=4  -D NR=1 -D FMA=0 -o src/f32-spmm/gen/4x1-minmax-neon-pipelined.c
+tools/xngen src/f32-spmm/neon-pipelined.c.in -D MR=8  -D NR=1 -D FMA=0 -o src/f32-spmm/gen/8x1-minmax-neon-pipelined.c
+tools/xngen src/f32-spmm/neon-pipelined.c.in -D MR=16 -D NR=1 -D FMA=0 -o src/f32-spmm/gen/16x1-minmax-neon-pipelined.c
+tools/xngen src/f32-spmm/neon-pipelined.c.in -D MR=32 -D NR=1 -D FMA=0 -o src/f32-spmm/gen/32x1-minmax-neon-pipelined.c
+
 tools/xngen src/f32-spmm/neon-pipelined.c.in -D MR=4  -D NR=1 -D FMA=1 -o src/f32-spmm/gen/4x1-minmax-neonfma-pipelined.c
 tools/xngen src/f32-spmm/neon-pipelined.c.in -D MR=8  -D NR=1 -D FMA=1 -o src/f32-spmm/gen/8x1-minmax-neonfma-pipelined.c
 tools/xngen src/f32-spmm/neon-pipelined.c.in -D MR=16 -D NR=1 -D FMA=1 -o src/f32-spmm/gen/16x1-minmax-neonfma-pipelined.c
diff --git a/scripts/generate-f32-velu.sh b/scripts/generate-f32-velu.sh
new file mode 100755
index 0000000..6a5f61e
--- /dev/null
+++ b/scripts/generate-f32-velu.sh
@@ -0,0 +1,210 @@
+#!/bin/sh
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+#################################### Scalar ###################################
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=1 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x1.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=2 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x2.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=3 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x3.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=4 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x4.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=5 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x5.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=6 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x6.c
+
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=1 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-p6-x1.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=2 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-p6-x2.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=3 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-p6-x3.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=4 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-p6-x4.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=5 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-p6-x5.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=6 -D WASM=0 -o src/f32-velu/gen/velu-scalar-rr2-p6-x6.c
+
+##################################### WAsm ####################################
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=1 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x1.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=2 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x2.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=3 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x3.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=4 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x4.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=5 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x5.c
+tools/xngen src/f32-velu/scalar-rr2-lut16-p3.c.in -D BATCH_TILE=6 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x6.c
+
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=1 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-p6-x1.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=2 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-p6-x2.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=3 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-p6-x3.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=4 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-p6-x4.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=5 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-p6-x5.c
+tools/xngen src/f32-velu/scalar-rr2-p6.c.in -D BATCH_TILE=6 -D WASM=1 -o src/f32-velu/gen/velu-wasm-rr2-p6-x6.c
+
+################################## WAsm SIMD ##################################
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=4  -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x4.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=8  -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x8.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=12 -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x12.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=16 -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x16.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=20 -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x20.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=24 -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x24.c
+
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=4  -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x4.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=8  -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x8.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=12 -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x12.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=16 -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x16.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=20 -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x20.c
+tools/xngen src/f32-velu/wasmsimd-rr2-lut16-p3.c.in -D BATCH_TILE=24 -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x24.c
+
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=4  -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x4.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=8  -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x8.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=12 -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x12.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=16 -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x16.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=20 -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x20.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=24 -D X86=0 -o src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x24.c
+
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=4  -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x4.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=8  -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x8.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=12 -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x12.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=16 -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x16.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=20 -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x20.c
+tools/xngen src/f32-velu/wasmsimd-rr2-p6.c.in -D BATCH_TILE=24 -D X86=1 -o src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x24.c
+
+################################### ARM NEON ##################################
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=4  -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-lut16-p3-x4.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=8  -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-lut16-p3-x8.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=12 -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-lut16-p3-x12.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=16 -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-lut16-p3-x16.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=20 -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-lut16-p3-x20.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=24 -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-lut16-p3-x24.c
+
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=4  -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-p6-x4.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=8  -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-p6-x8.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=12 -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-p6-x12.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=16 -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-p6-x16.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=20 -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-p6-x20.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=24 -D FMA=0 -o src/f32-velu/gen/velu-neon-rr2-p6-x24.c
+
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=4  -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x4.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=8  -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x8.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=12 -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x12.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=16 -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x16.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=20 -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x20.c
+tools/xngen src/f32-velu/neon-lut16-p3.c.in -D BATCH_TILE=24 -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x24.c
+
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=4  -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-p6-x4.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=8  -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-p6-x8.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=12 -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-p6-x12.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=16 -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-p6-x16.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=20 -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-p6-x20.c
+tools/xngen src/f32-velu/neon-p6.c.in -D BATCH_TILE=24 -D FMA=1 -o src/f32-velu/gen/velu-neonfma-rr1-p6-x24.c
+
+################################# x86 128-bit #################################
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=4  -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x4.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=8  -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x8.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=12 -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x12.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=16 -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x16.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=20 -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x20.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=24 -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x24.c
+
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=4  -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-p6-x4.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=8  -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-p6-x8.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=12 -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-p6-x12.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=16 -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-p6-x16.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=20 -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-p6-x20.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=24 -D SSE=2 -o src/f32-velu/gen/velu-sse2-rr2-p6-x24.c
+
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=4  -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x4.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=8  -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x8.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=12 -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x12.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=16 -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x16.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=20 -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x20.c
+tools/xngen src/f32-velu/sse-rr2-lut16-p3.c.in -D BATCH_TILE=24 -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x24.c
+
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=4  -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-p6-x4.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=8  -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-p6-x8.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=12 -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-p6-x12.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=16 -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-p6-x16.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=20 -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-p6-x20.c
+tools/xngen src/f32-velu/sse-rr2-p6.c.in -D BATCH_TILE=24 -D SSE=4 -o src/f32-velu/gen/velu-sse41-rr2-p6-x24.c
+
+################################# x86 256-bit #################################
+tools/xngen src/f32-velu/avx-rr2-lut4-p4-perm.c.in -D BATCH_TILE=8  -o src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x8.c
+tools/xngen src/f32-velu/avx-rr2-lut4-p4-perm.c.in -D BATCH_TILE=16 -o src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x16.c
+tools/xngen src/f32-velu/avx-rr2-lut4-p4-perm.c.in -D BATCH_TILE=24 -o src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x24.c
+tools/xngen src/f32-velu/avx-rr2-lut4-p4-perm.c.in -D BATCH_TILE=32 -o src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x32.c
+tools/xngen src/f32-velu/avx-rr2-lut4-p4-perm.c.in -D BATCH_TILE=40 -o src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x40.c
+tools/xngen src/f32-velu/avx-rr2-lut4-p4-perm.c.in -D BATCH_TILE=48 -o src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x48.c
+
+tools/xngen src/f32-velu/avx-rr2-lut16-p3.c.in -D BATCH_TILE=8  -o src/f32-velu/gen/velu-avx-rr2-lut16-p3-x8.c
+tools/xngen src/f32-velu/avx-rr2-lut16-p3.c.in -D BATCH_TILE=16 -o src/f32-velu/gen/velu-avx-rr2-lut16-p3-x16.c
+tools/xngen src/f32-velu/avx-rr2-lut16-p3.c.in -D BATCH_TILE=24 -o src/f32-velu/gen/velu-avx-rr2-lut16-p3-x24.c
+tools/xngen src/f32-velu/avx-rr2-lut16-p3.c.in -D BATCH_TILE=32 -o src/f32-velu/gen/velu-avx-rr2-lut16-p3-x32.c
+tools/xngen src/f32-velu/avx-rr2-lut16-p3.c.in -D BATCH_TILE=40 -o src/f32-velu/gen/velu-avx-rr2-lut16-p3-x40.c
+tools/xngen src/f32-velu/avx-rr2-lut16-p3.c.in -D BATCH_TILE=48 -o src/f32-velu/gen/velu-avx-rr2-lut16-p3-x48.c
+
+tools/xngen src/f32-velu/avx-rr2-p6.c.in -D BATCH_TILE=8  -o src/f32-velu/gen/velu-avx-rr2-p6-x8.c
+tools/xngen src/f32-velu/avx-rr2-p6.c.in -D BATCH_TILE=16 -o src/f32-velu/gen/velu-avx-rr2-p6-x16.c
+tools/xngen src/f32-velu/avx-rr2-p6.c.in -D BATCH_TILE=24 -o src/f32-velu/gen/velu-avx-rr2-p6-x24.c
+tools/xngen src/f32-velu/avx-rr2-p6.c.in -D BATCH_TILE=32 -o src/f32-velu/gen/velu-avx-rr2-p6-x32.c
+tools/xngen src/f32-velu/avx-rr2-p6.c.in -D BATCH_TILE=40 -o src/f32-velu/gen/velu-avx-rr2-p6-x40.c
+tools/xngen src/f32-velu/avx-rr2-p6.c.in -D BATCH_TILE=48 -o src/f32-velu/gen/velu-avx-rr2-p6-x48.c
+
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=8  -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x8.c
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=16 -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x16.c
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=24 -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x24.c
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=32 -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x32.c
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=40 -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x40.c
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=48 -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x48.c
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=56 -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x56.c
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=64 -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x64.c
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=72 -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x72.c
+tools/xngen src/f32-velu/avx2-rr1-lut4-p4-perm.c.in -D BATCH_TILE=80 -o src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x80.c
+
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=8  -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x8.c
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=16 -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x16.c
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=24 -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x24.c
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=32 -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x32.c
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=40 -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x40.c
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=48 -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x48.c
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=56 -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x56.c
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=64 -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x64.c
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=72 -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x72.c
+tools/xngen src/f32-velu/avx2-rr1-lut8-p4-perm.c.in -D BATCH_TILE=80 -o src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x80.c
+
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=8  -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x8.c
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=16 -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x16.c
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=24 -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x24.c
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=32 -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x32.c
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=40 -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x40.c
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=48 -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x48.c
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=56 -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x56.c
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=64 -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x64.c
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=72 -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x72.c
+tools/xngen src/f32-velu/avx2-rr1-lut16-p3-gather.c.in -D BATCH_TILE=80 -o src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x80.c
+
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=8  -o src/f32-velu/gen/velu-avx2-rr1-p6-x8.c
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=16 -o src/f32-velu/gen/velu-avx2-rr1-p6-x16.c
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=24 -o src/f32-velu/gen/velu-avx2-rr1-p6-x24.c
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=32 -o src/f32-velu/gen/velu-avx2-rr1-p6-x32.c
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=40 -o src/f32-velu/gen/velu-avx2-rr1-p6-x40.c
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=48 -o src/f32-velu/gen/velu-avx2-rr1-p6-x48.c
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=56 -o src/f32-velu/gen/velu-avx2-rr1-p6-x56.c
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=64 -o src/f32-velu/gen/velu-avx2-rr1-p6-x64.c
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=72 -o src/f32-velu/gen/velu-avx2-rr1-p6-x72.c
+tools/xngen src/f32-velu/avx2-rr1-p6.c.in -D BATCH_TILE=80 -o src/f32-velu/gen/velu-avx2-rr1-p6-x80.c
+
+################################# x86 512-bit #################################
+tools/xngen src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in -D BATCH_TILE=16  -o src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x16.c
+tools/xngen src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in -D BATCH_TILE=32  -o src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x32.c
+tools/xngen src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in -D BATCH_TILE=48  -o src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x48.c
+tools/xngen src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in -D BATCH_TILE=64  -o src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x64.c
+tools/xngen src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in -D BATCH_TILE=80  -o src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x80.c
+tools/xngen src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in -D BATCH_TILE=96  -o src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x96.c
+tools/xngen src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in -D BATCH_TILE=112 -o src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x112.c
+tools/xngen src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in -D BATCH_TILE=128 -o src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x128.c
+
+tools/xngen src/f32-velu/avx512f-rr1-p6.c.in -D BATCH_TILE=16  -o src/f32-velu/gen/velu-avx512f-rr1-p6-x16.c
+tools/xngen src/f32-velu/avx512f-rr1-p6.c.in -D BATCH_TILE=32  -o src/f32-velu/gen/velu-avx512f-rr1-p6-x32.c
+tools/xngen src/f32-velu/avx512f-rr1-p6.c.in -D BATCH_TILE=48  -o src/f32-velu/gen/velu-avx512f-rr1-p6-x48.c
+tools/xngen src/f32-velu/avx512f-rr1-p6.c.in -D BATCH_TILE=64  -o src/f32-velu/gen/velu-avx512f-rr1-p6-x64.c
+tools/xngen src/f32-velu/avx512f-rr1-p6.c.in -D BATCH_TILE=80  -o src/f32-velu/gen/velu-avx512f-rr1-p6-x80.c
+tools/xngen src/f32-velu/avx512f-rr1-p6.c.in -D BATCH_TILE=96  -o src/f32-velu/gen/velu-avx512f-rr1-p6-x96.c
+tools/xngen src/f32-velu/avx512f-rr1-p6.c.in -D BATCH_TILE=112 -o src/f32-velu/gen/velu-avx512f-rr1-p6-x112.c
+tools/xngen src/f32-velu/avx512f-rr1-p6.c.in -D BATCH_TILE=128 -o src/f32-velu/gen/velu-avx512f-rr1-p6-x128.c
+
+################################## Unit tests #################################
+tools/generate-vunary-test.py --spec test/f32-velu.yaml --output test/f32-velu.cc
diff --git a/src/f32-conv-hwc2chw/3x3s2p1c3x4-neon-2x2.c b/src/f32-conv-hwc2chw/3x3s2p1c3x4-neon-2x2.c
new file mode 100644
index 0000000..ab8684d
--- /dev/null
+++ b/src/f32-conv-hwc2chw/3x3s2p1c3x4-neon-2x2.c
@@ -0,0 +1,650 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/conv.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2(
+    size_t input_height,
+    size_t input_width,
+    size_t output_y_start,
+    size_t output_y_end,
+    const float* input,
+    const float* zero,
+    const float* weights,
+    float* output,
+    size_t input_padding_top,
+    size_t output_channels,
+    size_t output_height_stride,
+    size_t output_channel_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_width != 0);
+  assert(output_y_end > output_y_start);
+  assert(input_padding_top <= 1);
+  assert(output_channels != 0);
+
+  const size_t input_height_stride = input_width * 3 /* channels */ * sizeof(float);
+  const size_t input_width_increment = round_down_po2(input_width, 4) * 3 /* channels */ * sizeof(float);
+  const size_t output_width = (input_width + 1) / 2;
+  const size_t output_channel_increment = output_channel_stride * 4 - output_width * sizeof(float);
+
+  // Adjustment for padding processed below
+  const float* i0 = (const float*) ((uintptr_t) input + input_height_stride * (output_y_start * 2 - input_padding_top));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_height_stride);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_height_stride);
+  float* output0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start);
+  float* output1 = (float*) ((uintptr_t) output0 + output_height_stride);
+
+  if XNN_UNPREDICTABLE(output_y_start < input_padding_top) {
+    i0 = zero;
+  }
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+
+  for (size_t output_y = output_y_start; output_y < output_y_end; output_y += 2) {
+    const size_t input_y2 = output_y * 2 + 2 - input_padding_top;
+    const size_t input_y4 = input_y2 + 2;
+    if XNN_UNPREDICTABLE(input_y2 >= input_height) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(input_y4 > input_height) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(input_y4 >= input_height) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_y + 2 > output_y_end) {
+      output1 = output0;
+    }
+
+    const float* w = weights;
+    size_t c = output_channels;
+    float* o0c0 = output0;
+    float* o1c0 = output1;
+    float* o0c1 = (float*) ((uintptr_t) o0c0 + output_channel_stride);
+    float* o1c1 = (float*) ((uintptr_t) o1c0 + output_channel_stride);
+    float* o0c2 = (float*) ((uintptr_t) o0c1 + output_channel_stride);
+    float* o1c2 = (float*) ((uintptr_t) o1c1 + output_channel_stride);
+    float* o0c3 = (float*) ((uintptr_t) o0c2 + output_channel_stride);
+    float* o1c3 = (float*) ((uintptr_t) o1c2 + output_channel_stride);
+    do {
+      if XNN_UNPREDICTABLE(c < 2) {
+        o0c1 = o0c0;
+        o1c1 = o1c0;
+      }
+      if XNN_UNPREDICTABLE(c <= 2) {
+        o0c2 = o0c1;
+        o1c2 = o1c1;
+      }
+      if XNN_UNPREDICTABLE(c < 4) {
+        o0c3 = o0c2;
+        o1c3 = o1c2;
+      }
+
+      // viMx0 = ( iM0c2, iM0c1, iM0c0, --- )
+      float32x4_t vi0x0 = vmovq_n_f32(0.0f);
+      float32x4_t vi1x0 = vmovq_n_f32(0.0f);
+      float32x4_t vi2x0 = vmovq_n_f32(0.0f);
+      float32x4_t vi3x0 = vmovq_n_f32(0.0f);
+      float32x4_t vi4x0 = vmovq_n_f32(0.0f);
+
+      size_t iw = input_width;
+      for (; iw >= 4; iw -= 4) {
+        float32x4_t vo0x0 = vld1q_f32(w);
+        float32x4_t vo1x0 = vo0x0;
+        float32x4_t vo0x1 = vo0x0;
+        float32x4_t vo1x1 = vo0x0;
+
+        const float32x4_t vk00c0 = vld1q_f32(w + 4);
+
+        // viMx1 = ( iM2c0, iM1c2, iM1c1, iM1c0 )
+        const float32x4_t vi0x1 = vld1q_f32(i0); i0 += 4;
+        const float32x4_t vi1x1 = vld1q_f32(i1); i1 += 4;
+        const float32x4_t vi2x1 = vld1q_f32(i2); i2 += 4;
+        const float32x4_t vi3x1 = vld1q_f32(i3); i3 += 4;
+        const float32x4_t vi4x1 = vld1q_f32(i4); i4 += 4;
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk00c0, vget_low_f32(vi0x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk00c0, vget_low_f32(vi2x0), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk00c0, vget_high_f32(vi0x1), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk00c0, vget_high_f32(vi2x1), 1);
+
+        const float32x4_t vk10c0 = vld1q_f32(w + 8);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk10c0, vget_low_f32(vi1x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk10c0, vget_low_f32(vi3x0), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk10c0, vget_high_f32(vi1x1), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk10c0, vget_high_f32(vi3x1), 1);
+
+        const float32x4_t vk20c0 = vld1q_f32(w + 12);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk20c0, vget_low_f32(vi2x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk20c0, vget_low_f32(vi4x0), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk20c0, vget_high_f32(vi2x1), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk20c0, vget_high_f32(vi4x1), 1);
+
+        const float32x4_t vk00c1 = vld1q_f32(w + 16);
+
+        // viMx2 = ( iM3c1, iM3c0, iM2c2, iM2c1 )
+        const float32x4_t vi0x2 = vld1q_f32(i0); i0 += 4;
+        const float32x4_t vi1x2 = vld1q_f32(i1); i1 += 4;
+        const float32x4_t vi2x2 = vld1q_f32(i2); i2 += 4;
+        const float32x4_t vi3x2 = vld1q_f32(i3); i3 += 4;
+        const float32x4_t vi4x2 = vld1q_f32(i4); i4 += 4;
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk00c1, vget_high_f32(vi0x0), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk00c1, vget_high_f32(vi2x0), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk00c1, vget_low_f32(vi0x2), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk00c1, vget_low_f32(vi2x2), 0);
+
+        const float32x4_t vk10c1 = vld1q_f32(w + 20);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk10c1, vget_high_f32(vi1x0), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk10c1, vget_high_f32(vi3x0), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk10c1, vget_low_f32(vi1x2), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk10c1, vget_low_f32(vi3x2), 0);
+
+        const float32x4_t vk20c1 = vld1q_f32(w + 24);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk20c1, vget_high_f32(vi2x0), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk20c1, vget_high_f32(vi4x0), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk20c1, vget_low_f32(vi2x2), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk20c1, vget_low_f32(vi4x2), 0);
+
+        const float32x4_t vk00c2 = vld1q_f32(w + 28);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk00c2, vget_high_f32(vi0x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk00c2, vget_high_f32(vi2x0), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk00c2, vget_low_f32(vi0x2), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk00c2, vget_low_f32(vi2x2), 1);
+
+        const float32x4_t vk10c2 = vld1q_f32(w + 32);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk10c2, vget_high_f32(vi1x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk10c2, vget_high_f32(vi3x0), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk10c2, vget_low_f32(vi1x2), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk10c2, vget_low_f32(vi3x2), 1);
+
+        const float32x4_t vk20c2 = vld1q_f32(w + 36);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk20c2, vget_high_f32(vi2x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk20c2, vget_high_f32(vi4x0), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk20c2, vget_low_f32(vi2x2), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk20c2, vget_low_f32(vi4x2), 1);
+
+        const float32x4_t vk01c0 = vld1q_f32(w + 40);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk01c0, vget_low_f32(vi0x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk01c0, vget_low_f32(vi2x1), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk01c0, vget_high_f32(vi0x2), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk01c0, vget_high_f32(vi2x2), 0);
+
+        const float32x4_t vk11c0 = vld1q_f32(w + 44);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk11c0, vget_low_f32(vi1x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk11c0, vget_low_f32(vi3x1), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk11c0, vget_high_f32(vi1x2), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk11c0, vget_high_f32(vi3x2), 0);
+
+        const float32x4_t vk21c0 = vld1q_f32(w + 48);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk21c0, vget_low_f32(vi2x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk21c0, vget_low_f32(vi4x1), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk21c0, vget_high_f32(vi2x2), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk21c0, vget_high_f32(vi4x2), 0);
+
+        const float32x4_t vk01c1 = vld1q_f32(w + 52);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk01c1, vget_low_f32(vi0x1), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk01c1, vget_low_f32(vi2x1), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk01c1, vget_high_f32(vi0x2), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk01c1, vget_high_f32(vi2x2), 1);
+
+        const float32x4_t vk11c1 = vld1q_f32(w + 56);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk11c1, vget_low_f32(vi1x1), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk11c1, vget_low_f32(vi3x1), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk11c1, vget_high_f32(vi1x2), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk11c1, vget_high_f32(vi3x2), 1);
+
+        const float32x4_t vk21c1 = vld1q_f32(w + 60);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk21c1, vget_low_f32(vi2x1), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk21c1, vget_low_f32(vi4x1), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk21c1, vget_high_f32(vi2x2), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk21c1, vget_high_f32(vi4x2), 1);
+
+        const float32x4_t vk01c2 = vld1q_f32(w + 64);
+
+        // viMx3 = ( iM4c2, iM4c1, iM4c0, iM3c2 )
+        const float32x4_t vi0x3 = vld1q_f32(i0); i0 += 4;
+        const float32x4_t vi1x3 = vld1q_f32(i1); i1 += 4;
+        const float32x4_t vi2x3 = vld1q_f32(i2); i2 += 4;
+        const float32x4_t vi3x3 = vld1q_f32(i3); i3 += 4;
+        const float32x4_t vi4x3 = vld1q_f32(i4); i4 += 4;
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk01c2, vget_high_f32(vi0x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk01c2, vget_high_f32(vi2x1), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk01c2, vget_low_f32(vi0x3), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk01c2, vget_low_f32(vi2x3), 0);
+
+        const float32x4_t vk11c2 = vld1q_f32(w + 68);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk11c2, vget_high_f32(vi1x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk11c2, vget_high_f32(vi3x1), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk11c2, vget_low_f32(vi1x3), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk11c2, vget_low_f32(vi3x3), 0);
+
+        const float32x4_t vk21c2 = vld1q_f32(w + 72);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk21c2, vget_high_f32(vi2x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk21c2, vget_high_f32(vi4x1), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk21c2, vget_low_f32(vi2x3), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk21c2, vget_low_f32(vi4x3), 0);
+
+        const float32x4_t vk02c0 = vld1q_f32(w + 76);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk02c0, vget_high_f32(vi0x1), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk02c0, vget_high_f32(vi2x1), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk02c0, vget_low_f32(vi0x3), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk02c0, vget_low_f32(vi2x3), 1);
+
+        const float32x4_t vk12c0 = vld1q_f32(w + 80);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk12c0, vget_high_f32(vi1x1), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk12c0, vget_high_f32(vi3x1), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk12c0, vget_low_f32(vi1x3), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk12c0, vget_low_f32(vi3x3), 1);
+
+        const float32x4_t vk22c0 = vld1q_f32(w + 84);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk22c0, vget_high_f32(vi2x1), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk22c0, vget_high_f32(vi4x1), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk22c0, vget_low_f32(vi2x3), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk22c0, vget_low_f32(vi4x3), 1);
+
+        const float32x4_t vk02c1 = vld1q_f32(w + 88);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk02c1, vget_low_f32(vi0x2), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk02c1, vget_low_f32(vi2x2), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk02c1, vget_high_f32(vi0x3), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk02c1, vget_high_f32(vi2x3), 0);
+
+        const float32x4_t vk12c1 = vld1q_f32(w + 92);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk12c1, vget_low_f32(vi1x2), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk12c1, vget_low_f32(vi3x2), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk12c1, vget_high_f32(vi1x3), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk12c1, vget_high_f32(vi3x3), 0);
+
+        const float32x4_t vk22c1 = vld1q_f32(w + 96);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk22c1, vget_low_f32(vi2x2), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk22c1, vget_low_f32(vi4x2), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk22c1, vget_high_f32(vi2x3), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk22c1, vget_high_f32(vi4x3), 0);
+
+        const float32x4_t vk02c2 = vld1q_f32(w + 100);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk02c2, vget_low_f32(vi0x2), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk02c2, vget_low_f32(vi2x2), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk02c2, vget_high_f32(vi0x3), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk02c2, vget_high_f32(vi2x3), 1);
+
+        const float32x4_t vk12c2 = vld1q_f32(w + 104);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk12c2, vget_low_f32(vi1x2), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk12c2, vget_low_f32(vi3x2), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk12c2, vget_high_f32(vi1x3), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk12c2, vget_high_f32(vi3x3), 1);
+
+        const float32x4_t vk22c2 = vld1q_f32(w + 108);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk22c2, vget_low_f32(vi2x2), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk22c2, vget_low_f32(vi4x2), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk22c2, vget_high_f32(vi2x3), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk22c2, vget_high_f32(vi4x3), 1);
+
+        vi0x0 = vi0x3;
+        vi1x0 = vi1x3;
+        vi2x0 = vi2x3;
+        vi3x0 = vi3x3;
+        vi4x0 = vi4x3;
+
+        vo0x0 = vmaxq_f32(vo0x0, vmin);
+        vo1x0 = vmaxq_f32(vo1x0, vmin);
+        vo0x1 = vmaxq_f32(vo0x1, vmin);
+        vo1x1 = vmaxq_f32(vo1x1, vmin);
+
+        vo0x0 = vminq_f32(vo0x0, vmax);
+        vo1x0 = vminq_f32(vo1x0, vmax);
+        vo0x1 = vminq_f32(vo0x1, vmax);
+        vo1x1 = vminq_f32(vo1x1, vmax);
+
+        const float32x4x2_t vo0c0123 = vzipq_f32(vo0x0, vo0x1);
+        const float32x4x2_t vo1c0123 = vzipq_f32(vo1x0, vo1x1);
+
+        // Always 2+ output width elements remaining
+        vst1_f32(o1c0, vget_low_f32(vo1c0123.val[0])); o1c0 += 2;
+        vst1_f32(o1c1, vget_high_f32(vo1c0123.val[0])); o1c1 += 2;
+        vst1_f32(o1c2, vget_low_f32(vo1c0123.val[1])); o1c2 += 2;
+        vst1_f32(o1c3, vget_high_f32(vo1c0123.val[1])); o1c3 += 2;
+
+        vst1_f32(o0c0, vget_low_f32(vo0c0123.val[0])); o0c0 += 2;
+        vst1_f32(o0c1, vget_high_f32(vo0c0123.val[0])); o0c1 += 2;
+        vst1_f32(o0c2, vget_low_f32(vo0c0123.val[1])); o0c2 += 2;
+        vst1_f32(o0c3, vget_high_f32(vo0c0123.val[1])); o0c3 += 2;
+      }
+      assert(iw < 4);
+      if XNN_UNLIKELY(iw != 0) {
+        float32x4_t vo0x0 = vld1q_f32(w);
+        float32x4_t vo1x0 = vo0x0;
+        float32x4_t vo0x1 = vo0x0;
+        float32x4_t vo1x1 = vo0x0;
+
+        const float32x4_t vk00c0 = vld1q_f32(w + 4);
+
+        // viMx1 = ( iM2c0, iM1c2, iM1c1, iM1c0 )
+        float32x4_t vi0x1 = vld1q_f32(i0);
+        float32x4_t vi1x1 = vld1q_f32(i1);
+        float32x4_t vi2x1 = vld1q_f32(i2);
+        float32x4_t vi3x1 = vld1q_f32(i3);
+        float32x4_t vi4x1 = vld1q_f32(i4);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk00c0, vget_low_f32(vi0x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk00c0, vget_low_f32(vi2x0), 1);
+        if (iw > 2) {
+          vo0x1 = vmlaq_lane_f32(vo0x1, vk00c0, vget_high_f32(vi0x1), 1);
+          vo1x1 = vmlaq_lane_f32(vo1x1, vk00c0, vget_high_f32(vi2x1), 1);
+        }
+
+        const float32x4_t vk10c0 = vld1q_f32(w + 8);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk10c0, vget_low_f32(vi1x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk10c0, vget_low_f32(vi3x0), 1);
+        if (iw > 2) {
+          vo0x1 = vmlaq_lane_f32(vo0x1, vk10c0, vget_high_f32(vi1x1), 1);
+          vo1x1 = vmlaq_lane_f32(vo1x1, vk10c0, vget_high_f32(vi3x1), 1);
+        }
+
+        const float32x4_t vk20c0 = vld1q_f32(w + 12);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk20c0, vget_low_f32(vi2x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk20c0, vget_low_f32(vi4x0), 1);
+        if (iw > 2) {
+          vo0x1 = vmlaq_lane_f32(vo0x1, vk20c0, vget_high_f32(vi2x1), 1);
+          vo1x1 = vmlaq_lane_f32(vo1x1, vk20c0, vget_high_f32(vi4x1), 1);
+        }
+
+        const float32x4_t vk00c1 = vld1q_f32(w + 16);
+
+        float32x4_t vi0x2 = vmovq_n_f32(0.0f);
+        float32x4_t vi1x2 = vmovq_n_f32(0.0f);
+        float32x4_t vi2x2 = vmovq_n_f32(0.0f);
+        float32x4_t vi3x2 = vmovq_n_f32(0.0f);
+        float32x4_t vi4x2 = vmovq_n_f32(0.0f);
+        if (iw >= 2) {
+          // viMx2 = ( iM3c1, iM3c0, iM2c2, iM2c1 )
+          vi0x2 = vld1q_f32(i0 + 4);
+          vi1x2 = vld1q_f32(i1 + 4);
+          vi2x2 = vld1q_f32(i2 + 4);
+          vi3x2 = vld1q_f32(i3 + 4);
+          vi4x2 = vld1q_f32(i4 + 4);
+        }
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk00c1, vget_high_f32(vi0x0), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk00c1, vget_high_f32(vi2x0), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk00c1, vget_low_f32(vi0x2), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk00c1, vget_low_f32(vi2x2), 0);
+
+        const float32x4_t vk10c1 = vld1q_f32(w + 20);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk10c1, vget_high_f32(vi1x0), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk10c1, vget_high_f32(vi3x0), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk10c1, vget_low_f32(vi1x2), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk10c1, vget_low_f32(vi3x2), 0);
+
+        const float32x4_t vk20c1 = vld1q_f32(w + 24);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk20c1, vget_high_f32(vi2x0), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk20c1, vget_high_f32(vi4x0), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk20c1, vget_low_f32(vi2x2), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk20c1, vget_low_f32(vi4x2), 0);
+
+        const float32x4_t vk00c2 = vld1q_f32(w + 28);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk00c2, vget_high_f32(vi0x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk00c2, vget_high_f32(vi2x0), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk00c2, vget_low_f32(vi0x2), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk00c2, vget_low_f32(vi2x2), 1);
+
+        const float32x4_t vk10c2 = vld1q_f32(w + 32);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk10c2, vget_high_f32(vi1x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk10c2, vget_high_f32(vi3x0), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk10c2, vget_low_f32(vi1x2), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk10c2, vget_low_f32(vi3x2), 1);
+
+        const float32x4_t vk20c2 = vld1q_f32(w + 36);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk20c2, vget_high_f32(vi2x0), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk20c2, vget_high_f32(vi4x0), 1);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk20c2, vget_low_f32(vi2x2), 1);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk20c2, vget_low_f32(vi4x2), 1);
+
+        const float32x4_t vk01c0 = vld1q_f32(w + 40);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk01c0, vget_low_f32(vi0x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk01c0, vget_low_f32(vi2x1), 0);
+        if (iw > 2) {
+          vo0x1 = vmlaq_lane_f32(vo0x1, vk01c0, vget_high_f32(vi0x2), 0);
+          vo1x1 = vmlaq_lane_f32(vo1x1, vk01c0, vget_high_f32(vi2x2), 0);
+        }
+
+        const float32x4_t vk11c0 = vld1q_f32(w + 44);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk11c0, vget_low_f32(vi1x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk11c0, vget_low_f32(vi3x1), 0);
+        if (iw > 2) {
+          vo0x1 = vmlaq_lane_f32(vo0x1, vk11c0, vget_high_f32(vi1x2), 0);
+          vo1x1 = vmlaq_lane_f32(vo1x1, vk11c0, vget_high_f32(vi3x2), 0);
+        }
+
+        const float32x4_t vk21c0 = vld1q_f32(w + 48);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk21c0, vget_low_f32(vi2x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk21c0, vget_low_f32(vi4x1), 0);
+        if (iw > 2) {
+          vo0x1 = vmlaq_lane_f32(vo0x1, vk21c0, vget_high_f32(vi2x2), 0);
+          vo1x1 = vmlaq_lane_f32(vo1x1, vk21c0, vget_high_f32(vi4x2), 0);
+        }
+
+        const float32x4_t vk01c1 = vld1q_f32(w + 52);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk01c1, vget_low_f32(vi0x1), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk01c1, vget_low_f32(vi2x1), 1);
+        if (iw > 2) {
+          vo0x1 = vmlaq_lane_f32(vo0x1, vk01c1, vget_high_f32(vi0x2), 1);
+          vo1x1 = vmlaq_lane_f32(vo1x1, vk01c1, vget_high_f32(vi2x2), 1);
+        }
+
+        const float32x4_t vk11c1 = vld1q_f32(w + 56);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk11c1, vget_low_f32(vi1x1), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk11c1, vget_low_f32(vi3x1), 1);
+        if (iw > 2) {
+          vo0x1 = vmlaq_lane_f32(vo0x1, vk11c1, vget_high_f32(vi1x2), 1);
+          vo1x1 = vmlaq_lane_f32(vo1x1, vk11c1, vget_high_f32(vi3x2), 1);
+        }
+
+        const float32x4_t vk21c1 = vld1q_f32(w + 60);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk21c1, vget_low_f32(vi2x1), 1);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk21c1, vget_low_f32(vi4x1), 1);
+        if (iw > 2) {
+          vo0x1 = vmlaq_lane_f32(vo0x1, vk21c1, vget_high_f32(vi2x2), 1);
+          vo1x1 = vmlaq_lane_f32(vo1x1, vk21c1, vget_high_f32(vi4x2), 1);
+        }
+
+        const float32x4_t vk01c2 = vld1q_f32(w + 64);
+
+        float32x4_t vi0x3 = vmovq_n_f32(0.0f);
+        float32x4_t vi1x3 = vmovq_n_f32(0.0f);
+        float32x4_t vi2x3 = vmovq_n_f32(0.0f);
+        float32x4_t vi3x3 = vmovq_n_f32(0.0f);
+        float32x4_t vi4x3 = vmovq_n_f32(0.0f);
+        if (iw > 2) {
+          // viMx3 = ( 0.0, 0.0, 0.0, iM3c2 )
+          vi0x3 = vld1q_lane_f32(i0 + 8, vi0x3, 0);
+          vi1x3 = vld1q_lane_f32(i1 + 8, vi1x3, 0);
+          vi2x3 = vld1q_lane_f32(i2 + 8, vi2x3, 0);
+          vi3x3 = vld1q_lane_f32(i3 + 8, vi3x3, 0);
+          vi4x3 = vld1q_lane_f32(i4 + 8, vi4x3, 0);
+        }
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk01c2, vget_high_f32(vi0x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk01c2, vget_high_f32(vi2x1), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk01c2, vget_low_f32(vi0x3), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk01c2, vget_low_f32(vi2x3), 0);
+
+        const float32x4_t vk11c2 = vld1q_f32(w + 68);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk11c2, vget_high_f32(vi1x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk11c2, vget_high_f32(vi3x1), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk11c2, vget_low_f32(vi1x3), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk11c2, vget_low_f32(vi3x3), 0);
+
+        const float32x4_t vk21c2 = vld1q_f32(w + 72);
+
+        vo0x0 = vmlaq_lane_f32(vo0x0, vk21c2, vget_high_f32(vi2x1), 0);
+        vo1x0 = vmlaq_lane_f32(vo1x0, vk21c2, vget_high_f32(vi4x1), 0);
+        vo0x1 = vmlaq_lane_f32(vo0x1, vk21c2, vget_low_f32(vi2x3), 0);
+        vo1x1 = vmlaq_lane_f32(vo1x1, vk21c2, vget_low_f32(vi4x3), 0);
+
+        if (iw >= 2) {
+          const float32x4_t vk02c0 = vld1q_f32(w + 76);
+
+          vo0x0 = vmlaq_lane_f32(vo0x0, vk02c0, vget_high_f32(vi0x1), 1);
+          vo1x0 = vmlaq_lane_f32(vo1x0, vk02c0, vget_high_f32(vi2x1), 1);
+
+          const float32x4_t vk12c0 = vld1q_f32(w + 80);
+
+          vo0x0 = vmlaq_lane_f32(vo0x0, vk12c0, vget_high_f32(vi1x1), 1);
+          vo1x0 = vmlaq_lane_f32(vo1x0, vk12c0, vget_high_f32(vi3x1), 1);
+
+          const float32x4_t vk22c0 = vld1q_f32(w + 84);
+
+          vo0x0 = vmlaq_lane_f32(vo0x0, vk22c0, vget_high_f32(vi2x1), 1);
+          vo1x0 = vmlaq_lane_f32(vo1x0, vk22c0, vget_high_f32(vi4x1), 1);
+
+          const float32x4_t vk02c1 = vld1q_f32(w + 88);
+
+          vo0x0 = vmlaq_lane_f32(vo0x0, vk02c1, vget_low_f32(vi0x2), 0);
+          vo1x0 = vmlaq_lane_f32(vo1x0, vk02c1, vget_low_f32(vi2x2), 0);
+
+          const float32x4_t vk12c1 = vld1q_f32(w + 92);
+
+          vo0x0 = vmlaq_lane_f32(vo0x0, vk12c1, vget_low_f32(vi1x2), 0);
+          vo1x0 = vmlaq_lane_f32(vo1x0, vk12c1, vget_low_f32(vi3x2), 0);
+
+          const float32x4_t vk22c1 = vld1q_f32(w + 96);
+
+          vo0x0 = vmlaq_lane_f32(vo0x0, vk22c1, vget_low_f32(vi2x2), 0);
+          vo1x0 = vmlaq_lane_f32(vo1x0, vk22c1, vget_low_f32(vi4x2), 0);
+
+          const float32x4_t vk02c2 = vld1q_f32(w + 100);
+
+          vo0x0 = vmlaq_lane_f32(vo0x0, vk02c2, vget_low_f32(vi0x2), 1);
+          vo1x0 = vmlaq_lane_f32(vo1x0, vk02c2, vget_low_f32(vi2x2), 1);
+
+          const float32x4_t vk12c2 = vld1q_f32(w + 104);
+
+          vo0x0 = vmlaq_lane_f32(vo0x0, vk12c2, vget_low_f32(vi1x2), 1);
+          vo1x0 = vmlaq_lane_f32(vo1x0, vk12c2, vget_low_f32(vi3x2), 1);
+
+          const float32x4_t vk22c2 = vld1q_f32(w + 108);
+
+          vo0x0 = vmlaq_lane_f32(vo0x0, vk22c2, vget_low_f32(vi2x2), 1);
+          vo1x0 = vmlaq_lane_f32(vo1x0, vk22c2, vget_low_f32(vi4x2), 1);
+        }
+
+        vo0x0 = vmaxq_f32(vo0x0, vmin);
+        vo1x0 = vmaxq_f32(vo1x0, vmin);
+        vo0x1 = vmaxq_f32(vo0x1, vmin);
+        vo1x1 = vmaxq_f32(vo1x1, vmin);
+
+        vo0x0 = vminq_f32(vo0x0, vmax);
+        vo1x0 = vminq_f32(vo1x0, vmax);
+        vo0x1 = vminq_f32(vo0x1, vmax);
+        vo1x1 = vminq_f32(vo1x1, vmax);
+
+        if (iw == 3) {
+          // Exactly 2 output width elements remaining
+          const float32x4x2_t vo0c0123 = vzipq_f32(vo0x0, vo0x1);
+          const float32x4x2_t vo1c0123 = vzipq_f32(vo1x0, vo1x1);
+
+          vst1_f32(o1c0, vget_low_f32(vo1c0123.val[0])); o1c0 += 2;
+          vst1_f32(o1c1, vget_high_f32(vo1c0123.val[0])); o1c1 += 2;
+          vst1_f32(o1c2, vget_low_f32(vo1c0123.val[1])); o1c2 += 2;
+          vst1_f32(o1c3, vget_high_f32(vo1c0123.val[1])); o1c3 += 2;
+
+          vst1_f32(o0c0, vget_low_f32(vo0c0123.val[0])); o0c0 += 2;
+          vst1_f32(o0c1, vget_high_f32(vo0c0123.val[0])); o0c1 += 2;
+          vst1_f32(o0c2, vget_low_f32(vo0c0123.val[1])); o0c2 += 2;
+          vst1_f32(o0c3, vget_high_f32(vo0c0123.val[1])); o0c3 += 2;
+        } else {
+          // Exactly 1 output width element remaining
+
+          vst1q_lane_f32(o1c0, vo1x0, 0); o1c0 += 1;
+          vst1q_lane_f32(o1c1, vo1x0, 1); o1c1 += 1;
+          vst1q_lane_f32(o1c2, vo1x0, 2); o1c2 += 1;
+          vst1q_lane_f32(o1c3, vo1x0, 3); o1c3 += 1;
+
+          vst1q_lane_f32(o0c0, vo0x0, 0); o0c0 += 1;
+          vst1q_lane_f32(o0c1, vo0x0, 1); o0c1 += 1;
+          vst1q_lane_f32(o0c2, vo0x0, 2); o0c2 += 1;
+          vst1q_lane_f32(o0c3, vo0x0, 3); o0c3 += 1;
+        }
+      }
+      // Move output pointers back to the position of the first pixel in a row,
+      // and forward to the next block of output channels.
+      o0c0 = (float*) ((uintptr_t) o0c0 + output_channel_increment);
+      o0c1 = (float*) ((uintptr_t) o0c1 + output_channel_increment);
+      o0c2 = (float*) ((uintptr_t) o0c2 + output_channel_increment);
+      o0c3 = (float*) ((uintptr_t) o0c3 + output_channel_increment);
+      o1c0 = (float*) ((uintptr_t) o1c0 + output_channel_increment);
+      o1c1 = (float*) ((uintptr_t) o1c1 + output_channel_increment);
+      o1c2 = (float*) ((uintptr_t) o1c2 + output_channel_increment);
+      o1c3 = (float*) ((uintptr_t) o1c3 + output_channel_increment);
+      // Revert input pointers to the position of the first pixel in a row
+      i0 = (const float*) ((uintptr_t) i0 - input_width_increment);
+      i1 = (const float*) ((uintptr_t) i1 - input_width_increment);
+      i2 = (const float*) ((uintptr_t) i2 - input_width_increment);
+      i3 = (const float*) ((uintptr_t) i3 - input_width_increment);
+      i4 = (const float*) ((uintptr_t) i4 - input_width_increment);
+      // Move to the block of weights for the next 4 output channels
+      w += 112;
+      c = doz(c, 4);
+    } while (c != 0);
+    // Move output pointers forward to the next two rows
+    output0 = (float*) ((uintptr_t) output1 + output_height_stride);
+    output1 = (float*) ((uintptr_t) output0 + output_height_stride);
+    // Move input pointers forward to the next four rows
+    i0 = i4;
+    i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
+    i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
+    i3 = (const float*) ((uintptr_t) i2 + input_height_stride);
+    i4 = (const float*) ((uintptr_t) i3 + input_height_stride);
+  }
+}
diff --git a/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-arm.c b/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-arm.c
deleted file mode 100644
index 9853293..0000000
--- a/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-arm.c
+++ /dev/null
@@ -1,452 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    const float* i4 = input[4];
-    assert(i4 != NULL);
-    if XNN_UNPREDICTABLE(i4 != zero) {
-      i4 = (const float*) ((uintptr_t) i4 + input_offset);
-    }
-    const float* i5 = input[5];
-    assert(i5 != NULL);
-    if XNN_UNPREDICTABLE(i5 != zero) {
-      i5 = (const float*) ((uintptr_t) i5 + input_offset);
-    }
-    const float* i6 = input[6];
-    assert(i6 != NULL);
-    if XNN_UNPREDICTABLE(i6 != zero) {
-      i6 = (const float*) ((uintptr_t) i6 + input_offset);
-    }
-    const float* i7 = input[7];
-    assert(i7 != NULL);
-    if XNN_UNPREDICTABLE(i7 != zero) {
-      i7 = (const float*) ((uintptr_t) i7 + input_offset);
-    }
-    const float* i8 = input[8];
-    assert(i8 != NULL);
-    if XNN_UNPREDICTABLE(i8 != zero) {
-      i8 = (const float*) ((uintptr_t) i8 + input_offset);
-    }
-    const float* i9 = input[9];
-    assert(i9 != NULL);
-    if XNN_UNPREDICTABLE(i9 != zero) {
-      i9 = (const float*) ((uintptr_t) i9 + input_offset);
-    }
-    const float* i10 = input[10];
-    assert(i10 != NULL);
-    if XNN_UNPREDICTABLE(i10 != zero) {
-      i10 = (const float*) ((uintptr_t) i10 + input_offset);
-    }
-    const float* i11 = input[11];
-    assert(i11 != NULL);
-    if XNN_UNPREDICTABLE(i11 != zero) {
-      i11 = (const float*) ((uintptr_t) i11 + input_offset);
-    }
-    const float* i12 = input[12];
-    assert(i12 != NULL);
-    if XNN_UNPREDICTABLE(i12 != zero) {
-      i12 = (const float*) ((uintptr_t) i12 + input_offset);
-    }
-    const float* i13 = input[13];
-    assert(i13 != NULL);
-    if XNN_UNPREDICTABLE(i13 != zero) {
-      i13 = (const float*) ((uintptr_t) i13 + input_offset);
-    }
-    const float* i14 = input[14];
-    assert(i14 != NULL);
-    if XNN_UNPREDICTABLE(i14 != zero) {
-      i14 = (const float*) ((uintptr_t) i14 + input_offset);
-    }
-    const float* i15 = input[15];
-    assert(i15 != NULL);
-    if XNN_UNPREDICTABLE(i15 != zero) {
-      i15 = (const float*) ((uintptr_t) i15 + input_offset);
-    }
-    const float* i16 = input[16];
-    assert(i16 != NULL);
-    if XNN_UNPREDICTABLE(i16 != zero) {
-      i16 = (const float*) ((uintptr_t) i16 + input_offset);
-    }
-    const float* i17 = input[17];
-    assert(i17 != NULL);
-    if XNN_UNPREDICTABLE(i17 != zero) {
-      i17 = (const float*) ((uintptr_t) i17 + input_offset);
-    }
-    const float* i18 = input[18];
-    assert(i18 != NULL);
-    if XNN_UNPREDICTABLE(i18 != zero) {
-      i18 = (const float*) ((uintptr_t) i18 + input_offset);
-    }
-    const float* i19 = input[19];
-    assert(i19 != NULL);
-    if XNN_UNPREDICTABLE(i19 != zero) {
-      i19 = (const float*) ((uintptr_t) i19 + input_offset);
-    }
-    const float* i20 = input[20];
-    assert(i20 != NULL);
-    if XNN_UNPREDICTABLE(i20 != zero) {
-      i20 = (const float*) ((uintptr_t) i20 + input_offset);
-    }
-    const float* i21 = input[21];
-    assert(i21 != NULL);
-    if XNN_UNPREDICTABLE(i21 != zero) {
-      i21 = (const float*) ((uintptr_t) i21 + input_offset);
-    }
-    const float* i22 = input[22];
-    assert(i22 != NULL);
-    if XNN_UNPREDICTABLE(i22 != zero) {
-      i22 = (const float*) ((uintptr_t) i22 + input_offset);
-    }
-    const float* i23 = input[23];
-    assert(i23 != NULL);
-    if XNN_UNPREDICTABLE(i23 != zero) {
-      i23 = (const float*) ((uintptr_t) i23 + input_offset);
-    }
-    const float* i24 = input[24];
-    assert(i24 != NULL);
-    if XNN_UNPREDICTABLE(i24 != zero) {
-      i24 = (const float*) ((uintptr_t) i24 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      i4 += 4;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 20);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      i5 += 4;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 24);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      i6 += 4;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      i7 += 4;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      i8 += 4;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 36);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      i9 += 4;
-
-      const v128_t vk9x0123 = wasm_v128_load(w + 40);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      i10 += 4;
-
-      const v128_t vk10x0123 = wasm_v128_load(w + 44);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      i11 += 4;
-
-      const v128_t vk11x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      i12 += 4;
-
-      const v128_t vk12x0123 = wasm_v128_load(w + 52);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      i13 += 4;
-
-      const v128_t vk13x0123 = wasm_v128_load(w + 56);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      i14 += 4;
-
-      const v128_t vk14x0123 = wasm_v128_load(w + 60);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      i15 += 4;
-
-      const v128_t vk15x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      i16 += 4;
-
-      const v128_t vk16x0123 = wasm_v128_load(w + 68);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      i17 += 4;
-
-      const v128_t vk17x0123 = wasm_v128_load(w + 72);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      i18 += 4;
-
-      const v128_t vk18x0123 = wasm_v128_load(w + 76);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      i19 += 4;
-
-      const v128_t vk19x0123 = wasm_v128_load(w + 80);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      i20 += 4;
-
-      const v128_t vk20x0123 = wasm_v128_load(w + 84);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      i21 += 4;
-
-      const v128_t vk21x0123 = wasm_v128_load(w + 88);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      i22 += 4;
-
-      const v128_t vk22x0123 = wasm_v128_load(w + 92);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      i23 += 4;
-
-      const v128_t vk23x0123 = wasm_v128_load(w + 96);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      i24 += 4;
-
-      const v128_t vk24x0123 = wasm_v128_load(w + 100);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-
-      w += 104;
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vk4x0123 = wasm_v128_load(w + 20);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vk5x0123 = wasm_v128_load(w + 24);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vk6x0123 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vk7x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vk8x0123 = wasm_v128_load(w + 36);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      const v128_t vk9x0123 = wasm_v128_load(w + 40);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      const v128_t vk10x0123 = wasm_v128_load(w + 44);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      const v128_t vk11x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      const v128_t vk12x0123 = wasm_v128_load(w + 52);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      const v128_t vk13x0123 = wasm_v128_load(w + 56);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      const v128_t vk14x0123 = wasm_v128_load(w + 60);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      const v128_t vk15x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      const v128_t vk16x0123 = wasm_v128_load(w + 68);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      const v128_t vk17x0123 = wasm_v128_load(w + 72);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      const v128_t vk18x0123 = wasm_v128_load(w + 76);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      const v128_t vk19x0123 = wasm_v128_load(w + 80);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      const v128_t vk20x0123 = wasm_v128_load(w + 84);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      const v128_t vk21x0123 = wasm_v128_load(w + 88);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      const v128_t vk22x0123 = wasm_v128_load(w + 92);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      const v128_t vk23x0123 = wasm_v128_load(w + 96);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      const v128_t vk24x0123 = wasm_v128_load(w + 100);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-x86.c b/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-x86.c
deleted file mode 100644
index 4f3a5e7..0000000
--- a/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-acc2-x86.c
+++ /dev/null
@@ -1,452 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    const float* i4 = input[4];
-    assert(i4 != NULL);
-    if XNN_UNPREDICTABLE(i4 != zero) {
-      i4 = (const float*) ((uintptr_t) i4 + input_offset);
-    }
-    const float* i5 = input[5];
-    assert(i5 != NULL);
-    if XNN_UNPREDICTABLE(i5 != zero) {
-      i5 = (const float*) ((uintptr_t) i5 + input_offset);
-    }
-    const float* i6 = input[6];
-    assert(i6 != NULL);
-    if XNN_UNPREDICTABLE(i6 != zero) {
-      i6 = (const float*) ((uintptr_t) i6 + input_offset);
-    }
-    const float* i7 = input[7];
-    assert(i7 != NULL);
-    if XNN_UNPREDICTABLE(i7 != zero) {
-      i7 = (const float*) ((uintptr_t) i7 + input_offset);
-    }
-    const float* i8 = input[8];
-    assert(i8 != NULL);
-    if XNN_UNPREDICTABLE(i8 != zero) {
-      i8 = (const float*) ((uintptr_t) i8 + input_offset);
-    }
-    const float* i9 = input[9];
-    assert(i9 != NULL);
-    if XNN_UNPREDICTABLE(i9 != zero) {
-      i9 = (const float*) ((uintptr_t) i9 + input_offset);
-    }
-    const float* i10 = input[10];
-    assert(i10 != NULL);
-    if XNN_UNPREDICTABLE(i10 != zero) {
-      i10 = (const float*) ((uintptr_t) i10 + input_offset);
-    }
-    const float* i11 = input[11];
-    assert(i11 != NULL);
-    if XNN_UNPREDICTABLE(i11 != zero) {
-      i11 = (const float*) ((uintptr_t) i11 + input_offset);
-    }
-    const float* i12 = input[12];
-    assert(i12 != NULL);
-    if XNN_UNPREDICTABLE(i12 != zero) {
-      i12 = (const float*) ((uintptr_t) i12 + input_offset);
-    }
-    const float* i13 = input[13];
-    assert(i13 != NULL);
-    if XNN_UNPREDICTABLE(i13 != zero) {
-      i13 = (const float*) ((uintptr_t) i13 + input_offset);
-    }
-    const float* i14 = input[14];
-    assert(i14 != NULL);
-    if XNN_UNPREDICTABLE(i14 != zero) {
-      i14 = (const float*) ((uintptr_t) i14 + input_offset);
-    }
-    const float* i15 = input[15];
-    assert(i15 != NULL);
-    if XNN_UNPREDICTABLE(i15 != zero) {
-      i15 = (const float*) ((uintptr_t) i15 + input_offset);
-    }
-    const float* i16 = input[16];
-    assert(i16 != NULL);
-    if XNN_UNPREDICTABLE(i16 != zero) {
-      i16 = (const float*) ((uintptr_t) i16 + input_offset);
-    }
-    const float* i17 = input[17];
-    assert(i17 != NULL);
-    if XNN_UNPREDICTABLE(i17 != zero) {
-      i17 = (const float*) ((uintptr_t) i17 + input_offset);
-    }
-    const float* i18 = input[18];
-    assert(i18 != NULL);
-    if XNN_UNPREDICTABLE(i18 != zero) {
-      i18 = (const float*) ((uintptr_t) i18 + input_offset);
-    }
-    const float* i19 = input[19];
-    assert(i19 != NULL);
-    if XNN_UNPREDICTABLE(i19 != zero) {
-      i19 = (const float*) ((uintptr_t) i19 + input_offset);
-    }
-    const float* i20 = input[20];
-    assert(i20 != NULL);
-    if XNN_UNPREDICTABLE(i20 != zero) {
-      i20 = (const float*) ((uintptr_t) i20 + input_offset);
-    }
-    const float* i21 = input[21];
-    assert(i21 != NULL);
-    if XNN_UNPREDICTABLE(i21 != zero) {
-      i21 = (const float*) ((uintptr_t) i21 + input_offset);
-    }
-    const float* i22 = input[22];
-    assert(i22 != NULL);
-    if XNN_UNPREDICTABLE(i22 != zero) {
-      i22 = (const float*) ((uintptr_t) i22 + input_offset);
-    }
-    const float* i23 = input[23];
-    assert(i23 != NULL);
-    if XNN_UNPREDICTABLE(i23 != zero) {
-      i23 = (const float*) ((uintptr_t) i23 + input_offset);
-    }
-    const float* i24 = input[24];
-    assert(i24 != NULL);
-    if XNN_UNPREDICTABLE(i24 != zero) {
-      i24 = (const float*) ((uintptr_t) i24 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      i4 += 4;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 20);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      i5 += 4;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 24);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      i6 += 4;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      i7 += 4;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      i8 += 4;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 36);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      i9 += 4;
-
-      const v128_t vk9x0123 = wasm_v128_load(w + 40);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      i10 += 4;
-
-      const v128_t vk10x0123 = wasm_v128_load(w + 44);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      i11 += 4;
-
-      const v128_t vk11x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      i12 += 4;
-
-      const v128_t vk12x0123 = wasm_v128_load(w + 52);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      i13 += 4;
-
-      const v128_t vk13x0123 = wasm_v128_load(w + 56);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      i14 += 4;
-
-      const v128_t vk14x0123 = wasm_v128_load(w + 60);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      i15 += 4;
-
-      const v128_t vk15x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      i16 += 4;
-
-      const v128_t vk16x0123 = wasm_v128_load(w + 68);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      i17 += 4;
-
-      const v128_t vk17x0123 = wasm_v128_load(w + 72);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      i18 += 4;
-
-      const v128_t vk18x0123 = wasm_v128_load(w + 76);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      i19 += 4;
-
-      const v128_t vk19x0123 = wasm_v128_load(w + 80);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      i20 += 4;
-
-      const v128_t vk20x0123 = wasm_v128_load(w + 84);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      i21 += 4;
-
-      const v128_t vk21x0123 = wasm_v128_load(w + 88);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      i22 += 4;
-
-      const v128_t vk22x0123 = wasm_v128_load(w + 92);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      i23 += 4;
-
-      const v128_t vk23x0123 = wasm_v128_load(w + 96);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      i24 += 4;
-
-      const v128_t vk24x0123 = wasm_v128_load(w + 100);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-
-      w += 104;
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vk4x0123 = wasm_v128_load(w + 20);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vk5x0123 = wasm_v128_load(w + 24);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vk6x0123 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vk7x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vk8x0123 = wasm_v128_load(w + 36);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      const v128_t vk9x0123 = wasm_v128_load(w + 40);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      const v128_t vk10x0123 = wasm_v128_load(w + 44);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      const v128_t vk11x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      const v128_t vk12x0123 = wasm_v128_load(w + 52);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      const v128_t vk13x0123 = wasm_v128_load(w + 56);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      const v128_t vk14x0123 = wasm_v128_load(w + 60);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      const v128_t vk15x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      const v128_t vk16x0123 = wasm_v128_load(w + 68);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      const v128_t vk17x0123 = wasm_v128_load(w + 72);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      const v128_t vk18x0123 = wasm_v128_load(w + 76);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      const v128_t vk19x0123 = wasm_v128_load(w + 80);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      const v128_t vk20x0123 = wasm_v128_load(w + 84);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      const v128_t vk21x0123 = wasm_v128_load(w + 88);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      const v128_t vk22x0123 = wasm_v128_load(w + 92);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      const v128_t vk23x0123 = wasm_v128_load(w + 96);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      const v128_t vk24x0123 = wasm_v128_load(w + 100);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-arm-acc2.c b/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-arm-acc2.c
new file mode 100644
index 0000000..f977c9c
--- /dev/null
+++ b/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-arm-acc2.c
@@ -0,0 +1,452 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    const float* i4 = input[4];
+    assert(i4 != NULL);
+    if XNN_UNPREDICTABLE(i4 != zero) {
+      i4 = (const float*) ((uintptr_t) i4 + input_offset);
+    }
+    const float* i5 = input[5];
+    assert(i5 != NULL);
+    if XNN_UNPREDICTABLE(i5 != zero) {
+      i5 = (const float*) ((uintptr_t) i5 + input_offset);
+    }
+    const float* i6 = input[6];
+    assert(i6 != NULL);
+    if XNN_UNPREDICTABLE(i6 != zero) {
+      i6 = (const float*) ((uintptr_t) i6 + input_offset);
+    }
+    const float* i7 = input[7];
+    assert(i7 != NULL);
+    if XNN_UNPREDICTABLE(i7 != zero) {
+      i7 = (const float*) ((uintptr_t) i7 + input_offset);
+    }
+    const float* i8 = input[8];
+    assert(i8 != NULL);
+    if XNN_UNPREDICTABLE(i8 != zero) {
+      i8 = (const float*) ((uintptr_t) i8 + input_offset);
+    }
+    const float* i9 = input[9];
+    assert(i9 != NULL);
+    if XNN_UNPREDICTABLE(i9 != zero) {
+      i9 = (const float*) ((uintptr_t) i9 + input_offset);
+    }
+    const float* i10 = input[10];
+    assert(i10 != NULL);
+    if XNN_UNPREDICTABLE(i10 != zero) {
+      i10 = (const float*) ((uintptr_t) i10 + input_offset);
+    }
+    const float* i11 = input[11];
+    assert(i11 != NULL);
+    if XNN_UNPREDICTABLE(i11 != zero) {
+      i11 = (const float*) ((uintptr_t) i11 + input_offset);
+    }
+    const float* i12 = input[12];
+    assert(i12 != NULL);
+    if XNN_UNPREDICTABLE(i12 != zero) {
+      i12 = (const float*) ((uintptr_t) i12 + input_offset);
+    }
+    const float* i13 = input[13];
+    assert(i13 != NULL);
+    if XNN_UNPREDICTABLE(i13 != zero) {
+      i13 = (const float*) ((uintptr_t) i13 + input_offset);
+    }
+    const float* i14 = input[14];
+    assert(i14 != NULL);
+    if XNN_UNPREDICTABLE(i14 != zero) {
+      i14 = (const float*) ((uintptr_t) i14 + input_offset);
+    }
+    const float* i15 = input[15];
+    assert(i15 != NULL);
+    if XNN_UNPREDICTABLE(i15 != zero) {
+      i15 = (const float*) ((uintptr_t) i15 + input_offset);
+    }
+    const float* i16 = input[16];
+    assert(i16 != NULL);
+    if XNN_UNPREDICTABLE(i16 != zero) {
+      i16 = (const float*) ((uintptr_t) i16 + input_offset);
+    }
+    const float* i17 = input[17];
+    assert(i17 != NULL);
+    if XNN_UNPREDICTABLE(i17 != zero) {
+      i17 = (const float*) ((uintptr_t) i17 + input_offset);
+    }
+    const float* i18 = input[18];
+    assert(i18 != NULL);
+    if XNN_UNPREDICTABLE(i18 != zero) {
+      i18 = (const float*) ((uintptr_t) i18 + input_offset);
+    }
+    const float* i19 = input[19];
+    assert(i19 != NULL);
+    if XNN_UNPREDICTABLE(i19 != zero) {
+      i19 = (const float*) ((uintptr_t) i19 + input_offset);
+    }
+    const float* i20 = input[20];
+    assert(i20 != NULL);
+    if XNN_UNPREDICTABLE(i20 != zero) {
+      i20 = (const float*) ((uintptr_t) i20 + input_offset);
+    }
+    const float* i21 = input[21];
+    assert(i21 != NULL);
+    if XNN_UNPREDICTABLE(i21 != zero) {
+      i21 = (const float*) ((uintptr_t) i21 + input_offset);
+    }
+    const float* i22 = input[22];
+    assert(i22 != NULL);
+    if XNN_UNPREDICTABLE(i22 != zero) {
+      i22 = (const float*) ((uintptr_t) i22 + input_offset);
+    }
+    const float* i23 = input[23];
+    assert(i23 != NULL);
+    if XNN_UNPREDICTABLE(i23 != zero) {
+      i23 = (const float*) ((uintptr_t) i23 + input_offset);
+    }
+    const float* i24 = input[24];
+    assert(i24 != NULL);
+    if XNN_UNPREDICTABLE(i24 != zero) {
+      i24 = (const float*) ((uintptr_t) i24 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      i4 += 4;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 20);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      i5 += 4;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 24);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      i6 += 4;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      i7 += 4;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      i8 += 4;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 36);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      i9 += 4;
+
+      const v128_t vk9x0123 = wasm_v128_load(w + 40);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      i10 += 4;
+
+      const v128_t vk10x0123 = wasm_v128_load(w + 44);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      i11 += 4;
+
+      const v128_t vk11x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      i12 += 4;
+
+      const v128_t vk12x0123 = wasm_v128_load(w + 52);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      i13 += 4;
+
+      const v128_t vk13x0123 = wasm_v128_load(w + 56);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      i14 += 4;
+
+      const v128_t vk14x0123 = wasm_v128_load(w + 60);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      i15 += 4;
+
+      const v128_t vk15x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      i16 += 4;
+
+      const v128_t vk16x0123 = wasm_v128_load(w + 68);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      i17 += 4;
+
+      const v128_t vk17x0123 = wasm_v128_load(w + 72);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      i18 += 4;
+
+      const v128_t vk18x0123 = wasm_v128_load(w + 76);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      i19 += 4;
+
+      const v128_t vk19x0123 = wasm_v128_load(w + 80);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      i20 += 4;
+
+      const v128_t vk20x0123 = wasm_v128_load(w + 84);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      i21 += 4;
+
+      const v128_t vk21x0123 = wasm_v128_load(w + 88);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      i22 += 4;
+
+      const v128_t vk22x0123 = wasm_v128_load(w + 92);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      i23 += 4;
+
+      const v128_t vk23x0123 = wasm_v128_load(w + 96);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      i24 += 4;
+
+      const v128_t vk24x0123 = wasm_v128_load(w + 100);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+
+      w += 104;
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vk4x0123 = wasm_v128_load(w + 20);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vk5x0123 = wasm_v128_load(w + 24);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vk6x0123 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vk7x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vk8x0123 = wasm_v128_load(w + 36);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      const v128_t vk9x0123 = wasm_v128_load(w + 40);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      const v128_t vk10x0123 = wasm_v128_load(w + 44);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      const v128_t vk11x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      const v128_t vk12x0123 = wasm_v128_load(w + 52);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      const v128_t vk13x0123 = wasm_v128_load(w + 56);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      const v128_t vk14x0123 = wasm_v128_load(w + 60);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      const v128_t vk15x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      const v128_t vk16x0123 = wasm_v128_load(w + 68);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      const v128_t vk17x0123 = wasm_v128_load(w + 72);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      const v128_t vk18x0123 = wasm_v128_load(w + 76);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      const v128_t vk19x0123 = wasm_v128_load(w + 80);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      const v128_t vk20x0123 = wasm_v128_load(w + 84);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      const v128_t vk21x0123 = wasm_v128_load(w + 88);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      const v128_t vk22x0123 = wasm_v128_load(w + 92);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      const v128_t vk23x0123 = wasm_v128_load(w + 96);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      const v128_t vk24x0123 = wasm_v128_load(w + 100);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-x86-acc2.c b/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-x86-acc2.c
new file mode 100644
index 0000000..24b2361
--- /dev/null
+++ b/src/f32-dwconv/gen/up4x25-minmax-wasmsimd-x86-acc2.c
@@ -0,0 +1,452 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    const float* i4 = input[4];
+    assert(i4 != NULL);
+    if XNN_UNPREDICTABLE(i4 != zero) {
+      i4 = (const float*) ((uintptr_t) i4 + input_offset);
+    }
+    const float* i5 = input[5];
+    assert(i5 != NULL);
+    if XNN_UNPREDICTABLE(i5 != zero) {
+      i5 = (const float*) ((uintptr_t) i5 + input_offset);
+    }
+    const float* i6 = input[6];
+    assert(i6 != NULL);
+    if XNN_UNPREDICTABLE(i6 != zero) {
+      i6 = (const float*) ((uintptr_t) i6 + input_offset);
+    }
+    const float* i7 = input[7];
+    assert(i7 != NULL);
+    if XNN_UNPREDICTABLE(i7 != zero) {
+      i7 = (const float*) ((uintptr_t) i7 + input_offset);
+    }
+    const float* i8 = input[8];
+    assert(i8 != NULL);
+    if XNN_UNPREDICTABLE(i8 != zero) {
+      i8 = (const float*) ((uintptr_t) i8 + input_offset);
+    }
+    const float* i9 = input[9];
+    assert(i9 != NULL);
+    if XNN_UNPREDICTABLE(i9 != zero) {
+      i9 = (const float*) ((uintptr_t) i9 + input_offset);
+    }
+    const float* i10 = input[10];
+    assert(i10 != NULL);
+    if XNN_UNPREDICTABLE(i10 != zero) {
+      i10 = (const float*) ((uintptr_t) i10 + input_offset);
+    }
+    const float* i11 = input[11];
+    assert(i11 != NULL);
+    if XNN_UNPREDICTABLE(i11 != zero) {
+      i11 = (const float*) ((uintptr_t) i11 + input_offset);
+    }
+    const float* i12 = input[12];
+    assert(i12 != NULL);
+    if XNN_UNPREDICTABLE(i12 != zero) {
+      i12 = (const float*) ((uintptr_t) i12 + input_offset);
+    }
+    const float* i13 = input[13];
+    assert(i13 != NULL);
+    if XNN_UNPREDICTABLE(i13 != zero) {
+      i13 = (const float*) ((uintptr_t) i13 + input_offset);
+    }
+    const float* i14 = input[14];
+    assert(i14 != NULL);
+    if XNN_UNPREDICTABLE(i14 != zero) {
+      i14 = (const float*) ((uintptr_t) i14 + input_offset);
+    }
+    const float* i15 = input[15];
+    assert(i15 != NULL);
+    if XNN_UNPREDICTABLE(i15 != zero) {
+      i15 = (const float*) ((uintptr_t) i15 + input_offset);
+    }
+    const float* i16 = input[16];
+    assert(i16 != NULL);
+    if XNN_UNPREDICTABLE(i16 != zero) {
+      i16 = (const float*) ((uintptr_t) i16 + input_offset);
+    }
+    const float* i17 = input[17];
+    assert(i17 != NULL);
+    if XNN_UNPREDICTABLE(i17 != zero) {
+      i17 = (const float*) ((uintptr_t) i17 + input_offset);
+    }
+    const float* i18 = input[18];
+    assert(i18 != NULL);
+    if XNN_UNPREDICTABLE(i18 != zero) {
+      i18 = (const float*) ((uintptr_t) i18 + input_offset);
+    }
+    const float* i19 = input[19];
+    assert(i19 != NULL);
+    if XNN_UNPREDICTABLE(i19 != zero) {
+      i19 = (const float*) ((uintptr_t) i19 + input_offset);
+    }
+    const float* i20 = input[20];
+    assert(i20 != NULL);
+    if XNN_UNPREDICTABLE(i20 != zero) {
+      i20 = (const float*) ((uintptr_t) i20 + input_offset);
+    }
+    const float* i21 = input[21];
+    assert(i21 != NULL);
+    if XNN_UNPREDICTABLE(i21 != zero) {
+      i21 = (const float*) ((uintptr_t) i21 + input_offset);
+    }
+    const float* i22 = input[22];
+    assert(i22 != NULL);
+    if XNN_UNPREDICTABLE(i22 != zero) {
+      i22 = (const float*) ((uintptr_t) i22 + input_offset);
+    }
+    const float* i23 = input[23];
+    assert(i23 != NULL);
+    if XNN_UNPREDICTABLE(i23 != zero) {
+      i23 = (const float*) ((uintptr_t) i23 + input_offset);
+    }
+    const float* i24 = input[24];
+    assert(i24 != NULL);
+    if XNN_UNPREDICTABLE(i24 != zero) {
+      i24 = (const float*) ((uintptr_t) i24 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      i4 += 4;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 20);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      i5 += 4;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 24);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      i6 += 4;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      i7 += 4;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      i8 += 4;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 36);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      i9 += 4;
+
+      const v128_t vk9x0123 = wasm_v128_load(w + 40);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      i10 += 4;
+
+      const v128_t vk10x0123 = wasm_v128_load(w + 44);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      i11 += 4;
+
+      const v128_t vk11x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      i12 += 4;
+
+      const v128_t vk12x0123 = wasm_v128_load(w + 52);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      i13 += 4;
+
+      const v128_t vk13x0123 = wasm_v128_load(w + 56);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      i14 += 4;
+
+      const v128_t vk14x0123 = wasm_v128_load(w + 60);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      i15 += 4;
+
+      const v128_t vk15x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      i16 += 4;
+
+      const v128_t vk16x0123 = wasm_v128_load(w + 68);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      i17 += 4;
+
+      const v128_t vk17x0123 = wasm_v128_load(w + 72);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      i18 += 4;
+
+      const v128_t vk18x0123 = wasm_v128_load(w + 76);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      i19 += 4;
+
+      const v128_t vk19x0123 = wasm_v128_load(w + 80);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      i20 += 4;
+
+      const v128_t vk20x0123 = wasm_v128_load(w + 84);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      i21 += 4;
+
+      const v128_t vk21x0123 = wasm_v128_load(w + 88);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      i22 += 4;
+
+      const v128_t vk22x0123 = wasm_v128_load(w + 92);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      i23 += 4;
+
+      const v128_t vk23x0123 = wasm_v128_load(w + 96);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      i24 += 4;
+
+      const v128_t vk24x0123 = wasm_v128_load(w + 100);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+
+      w += 104;
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vk4x0123 = wasm_v128_load(w + 20);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vk5x0123 = wasm_v128_load(w + 24);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vk6x0123 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vk7x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vk8x0123 = wasm_v128_load(w + 36);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      const v128_t vk9x0123 = wasm_v128_load(w + 40);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      const v128_t vk10x0123 = wasm_v128_load(w + 44);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      const v128_t vk11x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      const v128_t vk12x0123 = wasm_v128_load(w + 52);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      const v128_t vk13x0123 = wasm_v128_load(w + 56);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      const v128_t vk14x0123 = wasm_v128_load(w + 60);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      const v128_t vk15x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      const v128_t vk16x0123 = wasm_v128_load(w + 68);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      const v128_t vk17x0123 = wasm_v128_load(w + 72);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      const v128_t vk18x0123 = wasm_v128_load(w + 76);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      const v128_t vk19x0123 = wasm_v128_load(w + 80);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      const v128_t vk20x0123 = wasm_v128_load(w + 84);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      const v128_t vk21x0123 = wasm_v128_load(w + 88);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      const v128_t vk22x0123 = wasm_v128_load(w + 92);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      const v128_t vk23x0123 = wasm_v128_load(w + 96);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      const v128_t vk24x0123 = wasm_v128_load(w + 100);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-arm.c b/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-arm.c
deleted file mode 100644
index 050b089..0000000
--- a/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-arm.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      w += 20;
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-x86.c b/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-x86.c
deleted file mode 100644
index 6d1e08e..0000000
--- a/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-acc2-x86.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      w += 20;
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-arm-acc2.c b/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-arm-acc2.c
new file mode 100644
index 0000000..7dfbdb1
--- /dev/null
+++ b/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-arm-acc2.c
@@ -0,0 +1,137 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      w += 20;
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-x86-acc2.c b/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-x86-acc2.c
new file mode 100644
index 0000000..7d805dd
--- /dev/null
+++ b/src/f32-dwconv/gen/up4x4-minmax-wasmsimd-x86-acc2.c
@@ -0,0 +1,137 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      w += 20;
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-arm.c b/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-arm.c
deleted file mode 100644
index 04de92a..0000000
--- a/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-arm.c
+++ /dev/null
@@ -1,212 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    const float* i4 = input[4];
-    assert(i4 != NULL);
-    if XNN_UNPREDICTABLE(i4 != zero) {
-      i4 = (const float*) ((uintptr_t) i4 + input_offset);
-    }
-    const float* i5 = input[5];
-    assert(i5 != NULL);
-    if XNN_UNPREDICTABLE(i5 != zero) {
-      i5 = (const float*) ((uintptr_t) i5 + input_offset);
-    }
-    const float* i6 = input[6];
-    assert(i6 != NULL);
-    if XNN_UNPREDICTABLE(i6 != zero) {
-      i6 = (const float*) ((uintptr_t) i6 + input_offset);
-    }
-    const float* i7 = input[7];
-    assert(i7 != NULL);
-    if XNN_UNPREDICTABLE(i7 != zero) {
-      i7 = (const float*) ((uintptr_t) i7 + input_offset);
-    }
-    const float* i8 = input[8];
-    assert(i8 != NULL);
-    if XNN_UNPREDICTABLE(i8 != zero) {
-      i8 = (const float*) ((uintptr_t) i8 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      i4 += 4;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 20);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      i5 += 4;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 24);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      i6 += 4;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      i7 += 4;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      i8 += 4;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 36);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      w += 40;
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vk4x0123 = wasm_v128_load(w + 20);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vk5x0123 = wasm_v128_load(w + 24);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vk6x0123 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vk7x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vk8x0123 = wasm_v128_load(w + 36);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-x86.c b/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-x86.c
deleted file mode 100644
index 7ee8418..0000000
--- a/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-acc2-x86.c
+++ /dev/null
@@ -1,212 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    const float* i4 = input[4];
-    assert(i4 != NULL);
-    if XNN_UNPREDICTABLE(i4 != zero) {
-      i4 = (const float*) ((uintptr_t) i4 + input_offset);
-    }
-    const float* i5 = input[5];
-    assert(i5 != NULL);
-    if XNN_UNPREDICTABLE(i5 != zero) {
-      i5 = (const float*) ((uintptr_t) i5 + input_offset);
-    }
-    const float* i6 = input[6];
-    assert(i6 != NULL);
-    if XNN_UNPREDICTABLE(i6 != zero) {
-      i6 = (const float*) ((uintptr_t) i6 + input_offset);
-    }
-    const float* i7 = input[7];
-    assert(i7 != NULL);
-    if XNN_UNPREDICTABLE(i7 != zero) {
-      i7 = (const float*) ((uintptr_t) i7 + input_offset);
-    }
-    const float* i8 = input[8];
-    assert(i8 != NULL);
-    if XNN_UNPREDICTABLE(i8 != zero) {
-      i8 = (const float*) ((uintptr_t) i8 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      i4 += 4;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 20);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      i5 += 4;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 24);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      i6 += 4;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      i7 += 4;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      i8 += 4;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 36);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      w += 40;
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 4);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 8);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 16);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vk4x0123 = wasm_v128_load(w + 20);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vk5x0123 = wasm_v128_load(w + 24);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vk6x0123 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vk7x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vk8x0123 = wasm_v128_load(w + 36);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      // Add up all accumulators to vacc0123p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-arm-acc2.c b/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-arm-acc2.c
new file mode 100644
index 0000000..3d86273
--- /dev/null
+++ b/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-arm-acc2.c
@@ -0,0 +1,212 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    const float* i4 = input[4];
+    assert(i4 != NULL);
+    if XNN_UNPREDICTABLE(i4 != zero) {
+      i4 = (const float*) ((uintptr_t) i4 + input_offset);
+    }
+    const float* i5 = input[5];
+    assert(i5 != NULL);
+    if XNN_UNPREDICTABLE(i5 != zero) {
+      i5 = (const float*) ((uintptr_t) i5 + input_offset);
+    }
+    const float* i6 = input[6];
+    assert(i6 != NULL);
+    if XNN_UNPREDICTABLE(i6 != zero) {
+      i6 = (const float*) ((uintptr_t) i6 + input_offset);
+    }
+    const float* i7 = input[7];
+    assert(i7 != NULL);
+    if XNN_UNPREDICTABLE(i7 != zero) {
+      i7 = (const float*) ((uintptr_t) i7 + input_offset);
+    }
+    const float* i8 = input[8];
+    assert(i8 != NULL);
+    if XNN_UNPREDICTABLE(i8 != zero) {
+      i8 = (const float*) ((uintptr_t) i8 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      i4 += 4;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 20);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      i5 += 4;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 24);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      i6 += 4;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      i7 += 4;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      i8 += 4;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 36);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      w += 40;
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vk4x0123 = wasm_v128_load(w + 20);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vk5x0123 = wasm_v128_load(w + 24);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vk6x0123 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vk7x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vk8x0123 = wasm_v128_load(w + 36);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-x86-acc2.c b/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-x86-acc2.c
new file mode 100644
index 0000000..05f1b12
--- /dev/null
+++ b/src/f32-dwconv/gen/up4x9-minmax-wasmsimd-x86-acc2.c
@@ -0,0 +1,212 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    const float* i4 = input[4];
+    assert(i4 != NULL);
+    if XNN_UNPREDICTABLE(i4 != zero) {
+      i4 = (const float*) ((uintptr_t) i4 + input_offset);
+    }
+    const float* i5 = input[5];
+    assert(i5 != NULL);
+    if XNN_UNPREDICTABLE(i5 != zero) {
+      i5 = (const float*) ((uintptr_t) i5 + input_offset);
+    }
+    const float* i6 = input[6];
+    assert(i6 != NULL);
+    if XNN_UNPREDICTABLE(i6 != zero) {
+      i6 = (const float*) ((uintptr_t) i6 + input_offset);
+    }
+    const float* i7 = input[7];
+    assert(i7 != NULL);
+    if XNN_UNPREDICTABLE(i7 != zero) {
+      i7 = (const float*) ((uintptr_t) i7 + input_offset);
+    }
+    const float* i8 = input[8];
+    assert(i8 != NULL);
+    if XNN_UNPREDICTABLE(i8 != zero) {
+      i8 = (const float*) ((uintptr_t) i8 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      i4 += 4;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 20);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      i5 += 4;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 24);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      i6 += 4;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      i7 += 4;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      i8 += 4;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 36);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      w += 40;
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 4);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 8);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 16);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vk4x0123 = wasm_v128_load(w + 20);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vk5x0123 = wasm_v128_load(w + 24);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vk6x0123 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vk7x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vk8x0123 = wasm_v128_load(w + 36);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      // Add up all accumulators to vacc0123p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-arm.c b/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-arm.c
deleted file mode 100644
index d5eb7ae..0000000
--- a/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-arm.c
+++ /dev/null
@@ -1,696 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    const float* i4 = input[4];
-    assert(i4 != NULL);
-    if XNN_UNPREDICTABLE(i4 != zero) {
-      i4 = (const float*) ((uintptr_t) i4 + input_offset);
-    }
-    const float* i5 = input[5];
-    assert(i5 != NULL);
-    if XNN_UNPREDICTABLE(i5 != zero) {
-      i5 = (const float*) ((uintptr_t) i5 + input_offset);
-    }
-    const float* i6 = input[6];
-    assert(i6 != NULL);
-    if XNN_UNPREDICTABLE(i6 != zero) {
-      i6 = (const float*) ((uintptr_t) i6 + input_offset);
-    }
-    const float* i7 = input[7];
-    assert(i7 != NULL);
-    if XNN_UNPREDICTABLE(i7 != zero) {
-      i7 = (const float*) ((uintptr_t) i7 + input_offset);
-    }
-    const float* i8 = input[8];
-    assert(i8 != NULL);
-    if XNN_UNPREDICTABLE(i8 != zero) {
-      i8 = (const float*) ((uintptr_t) i8 + input_offset);
-    }
-    const float* i9 = input[9];
-    assert(i9 != NULL);
-    if XNN_UNPREDICTABLE(i9 != zero) {
-      i9 = (const float*) ((uintptr_t) i9 + input_offset);
-    }
-    const float* i10 = input[10];
-    assert(i10 != NULL);
-    if XNN_UNPREDICTABLE(i10 != zero) {
-      i10 = (const float*) ((uintptr_t) i10 + input_offset);
-    }
-    const float* i11 = input[11];
-    assert(i11 != NULL);
-    if XNN_UNPREDICTABLE(i11 != zero) {
-      i11 = (const float*) ((uintptr_t) i11 + input_offset);
-    }
-    const float* i12 = input[12];
-    assert(i12 != NULL);
-    if XNN_UNPREDICTABLE(i12 != zero) {
-      i12 = (const float*) ((uintptr_t) i12 + input_offset);
-    }
-    const float* i13 = input[13];
-    assert(i13 != NULL);
-    if XNN_UNPREDICTABLE(i13 != zero) {
-      i13 = (const float*) ((uintptr_t) i13 + input_offset);
-    }
-    const float* i14 = input[14];
-    assert(i14 != NULL);
-    if XNN_UNPREDICTABLE(i14 != zero) {
-      i14 = (const float*) ((uintptr_t) i14 + input_offset);
-    }
-    const float* i15 = input[15];
-    assert(i15 != NULL);
-    if XNN_UNPREDICTABLE(i15 != zero) {
-      i15 = (const float*) ((uintptr_t) i15 + input_offset);
-    }
-    const float* i16 = input[16];
-    assert(i16 != NULL);
-    if XNN_UNPREDICTABLE(i16 != zero) {
-      i16 = (const float*) ((uintptr_t) i16 + input_offset);
-    }
-    const float* i17 = input[17];
-    assert(i17 != NULL);
-    if XNN_UNPREDICTABLE(i17 != zero) {
-      i17 = (const float*) ((uintptr_t) i17 + input_offset);
-    }
-    const float* i18 = input[18];
-    assert(i18 != NULL);
-    if XNN_UNPREDICTABLE(i18 != zero) {
-      i18 = (const float*) ((uintptr_t) i18 + input_offset);
-    }
-    const float* i19 = input[19];
-    assert(i19 != NULL);
-    if XNN_UNPREDICTABLE(i19 != zero) {
-      i19 = (const float*) ((uintptr_t) i19 + input_offset);
-    }
-    const float* i20 = input[20];
-    assert(i20 != NULL);
-    if XNN_UNPREDICTABLE(i20 != zero) {
-      i20 = (const float*) ((uintptr_t) i20 + input_offset);
-    }
-    const float* i21 = input[21];
-    assert(i21 != NULL);
-    if XNN_UNPREDICTABLE(i21 != zero) {
-      i21 = (const float*) ((uintptr_t) i21 + input_offset);
-    }
-    const float* i22 = input[22];
-    assert(i22 != NULL);
-    if XNN_UNPREDICTABLE(i22 != zero) {
-      i22 = (const float*) ((uintptr_t) i22 + input_offset);
-    }
-    const float* i23 = input[23];
-    assert(i23 != NULL);
-    if XNN_UNPREDICTABLE(i23 != zero) {
-      i23 = (const float*) ((uintptr_t) i23 + input_offset);
-    }
-    const float* i24 = input[24];
-    assert(i24 != NULL);
-    if XNN_UNPREDICTABLE(i24 != zero) {
-      i24 = (const float*) ((uintptr_t) i24 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 8; c -= 8) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-      v128_t vacc4567p0 = wasm_v128_load(w + 4);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
-      i0 += 8;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      const v128_t vk0x4567 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
-      i1 += 8;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      const v128_t vk1x4567 = wasm_v128_load(w + 20);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
-      i2 += 8;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      const v128_t vk2x4567 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
-      i3 += 8;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      const v128_t vk3x4567 = wasm_v128_load(w + 36);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
-      i4 += 8;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      const v128_t vk4x4567 = wasm_v128_load(w + 44);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi4x4567, vk4x4567));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
-      i5 += 8;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      const v128_t vk5x4567 = wasm_v128_load(w + 52);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi5x4567, vk5x4567));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
-      i6 += 8;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      const v128_t vk6x4567 = wasm_v128_load(w + 60);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi6x4567, vk6x4567));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
-      i7 += 8;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      const v128_t vk7x4567 = wasm_v128_load(w + 68);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi7x4567, vk7x4567));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
-      i8 += 8;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      const v128_t vk8x4567 = wasm_v128_load(w + 76);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi8x4567, vk8x4567));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      const v128_t vi9x4567 = wasm_v128_load(i9 + 4);
-      i9 += 8;
-
-      const v128_t vk9x0123 = wasm_v128_load(w + 80);
-      const v128_t vk9x4567 = wasm_v128_load(w + 84);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi9x4567, vk9x4567));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      const v128_t vi10x4567 = wasm_v128_load(i10 + 4);
-      i10 += 8;
-
-      const v128_t vk10x0123 = wasm_v128_load(w + 88);
-      const v128_t vk10x4567 = wasm_v128_load(w + 92);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi10x4567, vk10x4567));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      const v128_t vi11x4567 = wasm_v128_load(i11 + 4);
-      i11 += 8;
-
-      const v128_t vk11x0123 = wasm_v128_load(w + 96);
-      const v128_t vk11x4567 = wasm_v128_load(w + 100);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi11x4567, vk11x4567));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      const v128_t vi12x4567 = wasm_v128_load(i12 + 4);
-      i12 += 8;
-
-      const v128_t vk12x0123 = wasm_v128_load(w + 104);
-      const v128_t vk12x4567 = wasm_v128_load(w + 108);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi12x4567, vk12x4567));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      const v128_t vi13x4567 = wasm_v128_load(i13 + 4);
-      i13 += 8;
-
-      const v128_t vk13x0123 = wasm_v128_load(w + 112);
-      const v128_t vk13x4567 = wasm_v128_load(w + 116);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi13x4567, vk13x4567));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      const v128_t vi14x4567 = wasm_v128_load(i14 + 4);
-      i14 += 8;
-
-      const v128_t vk14x0123 = wasm_v128_load(w + 120);
-      const v128_t vk14x4567 = wasm_v128_load(w + 124);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi14x4567, vk14x4567));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      const v128_t vi15x4567 = wasm_v128_load(i15 + 4);
-      i15 += 8;
-
-      const v128_t vk15x0123 = wasm_v128_load(w + 128);
-      const v128_t vk15x4567 = wasm_v128_load(w + 132);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi15x4567, vk15x4567));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      const v128_t vi16x4567 = wasm_v128_load(i16 + 4);
-      i16 += 8;
-
-      const v128_t vk16x0123 = wasm_v128_load(w + 136);
-      const v128_t vk16x4567 = wasm_v128_load(w + 140);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi16x4567, vk16x4567));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      const v128_t vi17x4567 = wasm_v128_load(i17 + 4);
-      i17 += 8;
-
-      const v128_t vk17x0123 = wasm_v128_load(w + 144);
-      const v128_t vk17x4567 = wasm_v128_load(w + 148);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi17x4567, vk17x4567));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      const v128_t vi18x4567 = wasm_v128_load(i18 + 4);
-      i18 += 8;
-
-      const v128_t vk18x0123 = wasm_v128_load(w + 152);
-      const v128_t vk18x4567 = wasm_v128_load(w + 156);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi18x4567, vk18x4567));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      const v128_t vi19x4567 = wasm_v128_load(i19 + 4);
-      i19 += 8;
-
-      const v128_t vk19x0123 = wasm_v128_load(w + 160);
-      const v128_t vk19x4567 = wasm_v128_load(w + 164);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi19x4567, vk19x4567));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      const v128_t vi20x4567 = wasm_v128_load(i20 + 4);
-      i20 += 8;
-
-      const v128_t vk20x0123 = wasm_v128_load(w + 168);
-      const v128_t vk20x4567 = wasm_v128_load(w + 172);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi20x4567, vk20x4567));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      const v128_t vi21x4567 = wasm_v128_load(i21 + 4);
-      i21 += 8;
-
-      const v128_t vk21x0123 = wasm_v128_load(w + 176);
-      const v128_t vk21x4567 = wasm_v128_load(w + 180);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi21x4567, vk21x4567));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      const v128_t vi22x4567 = wasm_v128_load(i22 + 4);
-      i22 += 8;
-
-      const v128_t vk22x0123 = wasm_v128_load(w + 184);
-      const v128_t vk22x4567 = wasm_v128_load(w + 188);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi22x4567, vk22x4567));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      const v128_t vi23x4567 = wasm_v128_load(i23 + 4);
-      i23 += 8;
-
-      const v128_t vk23x0123 = wasm_v128_load(w + 192);
-      const v128_t vk23x4567 = wasm_v128_load(w + 196);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi23x4567, vk23x4567));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      const v128_t vi24x4567 = wasm_v128_load(i24 + 4);
-      i24 += 8;
-
-      const v128_t vk24x0123 = wasm_v128_load(w + 200);
-      const v128_t vk24x4567 = wasm_v128_load(w + 204);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi24x4567, vk24x4567));
-
-      w += 208;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      v128_t vacc4567 = wasm_f32x4_max(vacc4567p0, vmin);
-
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-      vacc4567 = wasm_f32x4_min(vacc4567, vmax);
-
-      wasm_v128_store(output, vacc0123);
-      wasm_v128_store(output + 4, vacc4567);
-      output += 8;
-    }
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      i4 += 4;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      i5 += 4;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      i6 += 4;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      i7 += 4;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      i8 += 4;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      i9 += 4;
-
-      const v128_t vk9x0123 = wasm_v128_load(w + 80);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      i10 += 4;
-
-      const v128_t vk10x0123 = wasm_v128_load(w + 88);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      i11 += 4;
-
-      const v128_t vk11x0123 = wasm_v128_load(w + 96);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      i12 += 4;
-
-      const v128_t vk12x0123 = wasm_v128_load(w + 104);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      i13 += 4;
-
-      const v128_t vk13x0123 = wasm_v128_load(w + 112);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      i14 += 4;
-
-      const v128_t vk14x0123 = wasm_v128_load(w + 120);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      i15 += 4;
-
-      const v128_t vk15x0123 = wasm_v128_load(w + 128);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      i16 += 4;
-
-      const v128_t vk16x0123 = wasm_v128_load(w + 136);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      i17 += 4;
-
-      const v128_t vk17x0123 = wasm_v128_load(w + 144);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      i18 += 4;
-
-      const v128_t vk18x0123 = wasm_v128_load(w + 152);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      i19 += 4;
-
-      const v128_t vk19x0123 = wasm_v128_load(w + 160);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      i20 += 4;
-
-      const v128_t vk20x0123 = wasm_v128_load(w + 168);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      i21 += 4;
-
-      const v128_t vk21x0123 = wasm_v128_load(w + 176);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      i22 += 4;
-
-      const v128_t vk22x0123 = wasm_v128_load(w + 184);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      i23 += 4;
-
-      const v128_t vk23x0123 = wasm_v128_load(w + 192);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      i24 += 4;
-
-      const v128_t vk24x0123 = wasm_v128_load(w + 200);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-
-      w += 4;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      const v128_t vk9x0123 = wasm_v128_load(w + 80);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      const v128_t vk10x0123 = wasm_v128_load(w + 88);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      const v128_t vk11x0123 = wasm_v128_load(w + 96);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      const v128_t vk12x0123 = wasm_v128_load(w + 104);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      const v128_t vk13x0123 = wasm_v128_load(w + 112);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      const v128_t vk14x0123 = wasm_v128_load(w + 120);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      const v128_t vk15x0123 = wasm_v128_load(w + 128);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      const v128_t vk16x0123 = wasm_v128_load(w + 136);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      const v128_t vk17x0123 = wasm_v128_load(w + 144);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      const v128_t vk18x0123 = wasm_v128_load(w + 152);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      const v128_t vk19x0123 = wasm_v128_load(w + 160);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      const v128_t vk20x0123 = wasm_v128_load(w + 168);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      const v128_t vk21x0123 = wasm_v128_load(w + 176);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      const v128_t vk22x0123 = wasm_v128_load(w + 184);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      const v128_t vk23x0123 = wasm_v128_load(w + 192);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      const v128_t vk24x0123 = wasm_v128_load(w + 200);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-x86.c b/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-x86.c
deleted file mode 100644
index 8151aa7..0000000
--- a/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-acc2-x86.c
+++ /dev/null
@@ -1,696 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    const float* i4 = input[4];
-    assert(i4 != NULL);
-    if XNN_UNPREDICTABLE(i4 != zero) {
-      i4 = (const float*) ((uintptr_t) i4 + input_offset);
-    }
-    const float* i5 = input[5];
-    assert(i5 != NULL);
-    if XNN_UNPREDICTABLE(i5 != zero) {
-      i5 = (const float*) ((uintptr_t) i5 + input_offset);
-    }
-    const float* i6 = input[6];
-    assert(i6 != NULL);
-    if XNN_UNPREDICTABLE(i6 != zero) {
-      i6 = (const float*) ((uintptr_t) i6 + input_offset);
-    }
-    const float* i7 = input[7];
-    assert(i7 != NULL);
-    if XNN_UNPREDICTABLE(i7 != zero) {
-      i7 = (const float*) ((uintptr_t) i7 + input_offset);
-    }
-    const float* i8 = input[8];
-    assert(i8 != NULL);
-    if XNN_UNPREDICTABLE(i8 != zero) {
-      i8 = (const float*) ((uintptr_t) i8 + input_offset);
-    }
-    const float* i9 = input[9];
-    assert(i9 != NULL);
-    if XNN_UNPREDICTABLE(i9 != zero) {
-      i9 = (const float*) ((uintptr_t) i9 + input_offset);
-    }
-    const float* i10 = input[10];
-    assert(i10 != NULL);
-    if XNN_UNPREDICTABLE(i10 != zero) {
-      i10 = (const float*) ((uintptr_t) i10 + input_offset);
-    }
-    const float* i11 = input[11];
-    assert(i11 != NULL);
-    if XNN_UNPREDICTABLE(i11 != zero) {
-      i11 = (const float*) ((uintptr_t) i11 + input_offset);
-    }
-    const float* i12 = input[12];
-    assert(i12 != NULL);
-    if XNN_UNPREDICTABLE(i12 != zero) {
-      i12 = (const float*) ((uintptr_t) i12 + input_offset);
-    }
-    const float* i13 = input[13];
-    assert(i13 != NULL);
-    if XNN_UNPREDICTABLE(i13 != zero) {
-      i13 = (const float*) ((uintptr_t) i13 + input_offset);
-    }
-    const float* i14 = input[14];
-    assert(i14 != NULL);
-    if XNN_UNPREDICTABLE(i14 != zero) {
-      i14 = (const float*) ((uintptr_t) i14 + input_offset);
-    }
-    const float* i15 = input[15];
-    assert(i15 != NULL);
-    if XNN_UNPREDICTABLE(i15 != zero) {
-      i15 = (const float*) ((uintptr_t) i15 + input_offset);
-    }
-    const float* i16 = input[16];
-    assert(i16 != NULL);
-    if XNN_UNPREDICTABLE(i16 != zero) {
-      i16 = (const float*) ((uintptr_t) i16 + input_offset);
-    }
-    const float* i17 = input[17];
-    assert(i17 != NULL);
-    if XNN_UNPREDICTABLE(i17 != zero) {
-      i17 = (const float*) ((uintptr_t) i17 + input_offset);
-    }
-    const float* i18 = input[18];
-    assert(i18 != NULL);
-    if XNN_UNPREDICTABLE(i18 != zero) {
-      i18 = (const float*) ((uintptr_t) i18 + input_offset);
-    }
-    const float* i19 = input[19];
-    assert(i19 != NULL);
-    if XNN_UNPREDICTABLE(i19 != zero) {
-      i19 = (const float*) ((uintptr_t) i19 + input_offset);
-    }
-    const float* i20 = input[20];
-    assert(i20 != NULL);
-    if XNN_UNPREDICTABLE(i20 != zero) {
-      i20 = (const float*) ((uintptr_t) i20 + input_offset);
-    }
-    const float* i21 = input[21];
-    assert(i21 != NULL);
-    if XNN_UNPREDICTABLE(i21 != zero) {
-      i21 = (const float*) ((uintptr_t) i21 + input_offset);
-    }
-    const float* i22 = input[22];
-    assert(i22 != NULL);
-    if XNN_UNPREDICTABLE(i22 != zero) {
-      i22 = (const float*) ((uintptr_t) i22 + input_offset);
-    }
-    const float* i23 = input[23];
-    assert(i23 != NULL);
-    if XNN_UNPREDICTABLE(i23 != zero) {
-      i23 = (const float*) ((uintptr_t) i23 + input_offset);
-    }
-    const float* i24 = input[24];
-    assert(i24 != NULL);
-    if XNN_UNPREDICTABLE(i24 != zero) {
-      i24 = (const float*) ((uintptr_t) i24 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 8; c -= 8) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-      v128_t vacc4567p0 = wasm_v128_load(w + 4);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
-      i0 += 8;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      const v128_t vk0x4567 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
-      i1 += 8;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      const v128_t vk1x4567 = wasm_v128_load(w + 20);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
-      i2 += 8;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      const v128_t vk2x4567 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
-      i3 += 8;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      const v128_t vk3x4567 = wasm_v128_load(w + 36);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
-      i4 += 8;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      const v128_t vk4x4567 = wasm_v128_load(w + 44);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi4x4567, vk4x4567));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
-      i5 += 8;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      const v128_t vk5x4567 = wasm_v128_load(w + 52);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi5x4567, vk5x4567));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
-      i6 += 8;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      const v128_t vk6x4567 = wasm_v128_load(w + 60);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi6x4567, vk6x4567));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
-      i7 += 8;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      const v128_t vk7x4567 = wasm_v128_load(w + 68);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi7x4567, vk7x4567));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
-      i8 += 8;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      const v128_t vk8x4567 = wasm_v128_load(w + 76);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi8x4567, vk8x4567));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      const v128_t vi9x4567 = wasm_v128_load(i9 + 4);
-      i9 += 8;
-
-      const v128_t vk9x0123 = wasm_v128_load(w + 80);
-      const v128_t vk9x4567 = wasm_v128_load(w + 84);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi9x4567, vk9x4567));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      const v128_t vi10x4567 = wasm_v128_load(i10 + 4);
-      i10 += 8;
-
-      const v128_t vk10x0123 = wasm_v128_load(w + 88);
-      const v128_t vk10x4567 = wasm_v128_load(w + 92);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi10x4567, vk10x4567));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      const v128_t vi11x4567 = wasm_v128_load(i11 + 4);
-      i11 += 8;
-
-      const v128_t vk11x0123 = wasm_v128_load(w + 96);
-      const v128_t vk11x4567 = wasm_v128_load(w + 100);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi11x4567, vk11x4567));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      const v128_t vi12x4567 = wasm_v128_load(i12 + 4);
-      i12 += 8;
-
-      const v128_t vk12x0123 = wasm_v128_load(w + 104);
-      const v128_t vk12x4567 = wasm_v128_load(w + 108);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi12x4567, vk12x4567));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      const v128_t vi13x4567 = wasm_v128_load(i13 + 4);
-      i13 += 8;
-
-      const v128_t vk13x0123 = wasm_v128_load(w + 112);
-      const v128_t vk13x4567 = wasm_v128_load(w + 116);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi13x4567, vk13x4567));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      const v128_t vi14x4567 = wasm_v128_load(i14 + 4);
-      i14 += 8;
-
-      const v128_t vk14x0123 = wasm_v128_load(w + 120);
-      const v128_t vk14x4567 = wasm_v128_load(w + 124);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi14x4567, vk14x4567));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      const v128_t vi15x4567 = wasm_v128_load(i15 + 4);
-      i15 += 8;
-
-      const v128_t vk15x0123 = wasm_v128_load(w + 128);
-      const v128_t vk15x4567 = wasm_v128_load(w + 132);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi15x4567, vk15x4567));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      const v128_t vi16x4567 = wasm_v128_load(i16 + 4);
-      i16 += 8;
-
-      const v128_t vk16x0123 = wasm_v128_load(w + 136);
-      const v128_t vk16x4567 = wasm_v128_load(w + 140);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi16x4567, vk16x4567));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      const v128_t vi17x4567 = wasm_v128_load(i17 + 4);
-      i17 += 8;
-
-      const v128_t vk17x0123 = wasm_v128_load(w + 144);
-      const v128_t vk17x4567 = wasm_v128_load(w + 148);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi17x4567, vk17x4567));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      const v128_t vi18x4567 = wasm_v128_load(i18 + 4);
-      i18 += 8;
-
-      const v128_t vk18x0123 = wasm_v128_load(w + 152);
-      const v128_t vk18x4567 = wasm_v128_load(w + 156);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi18x4567, vk18x4567));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      const v128_t vi19x4567 = wasm_v128_load(i19 + 4);
-      i19 += 8;
-
-      const v128_t vk19x0123 = wasm_v128_load(w + 160);
-      const v128_t vk19x4567 = wasm_v128_load(w + 164);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi19x4567, vk19x4567));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      const v128_t vi20x4567 = wasm_v128_load(i20 + 4);
-      i20 += 8;
-
-      const v128_t vk20x0123 = wasm_v128_load(w + 168);
-      const v128_t vk20x4567 = wasm_v128_load(w + 172);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi20x4567, vk20x4567));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      const v128_t vi21x4567 = wasm_v128_load(i21 + 4);
-      i21 += 8;
-
-      const v128_t vk21x0123 = wasm_v128_load(w + 176);
-      const v128_t vk21x4567 = wasm_v128_load(w + 180);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi21x4567, vk21x4567));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      const v128_t vi22x4567 = wasm_v128_load(i22 + 4);
-      i22 += 8;
-
-      const v128_t vk22x0123 = wasm_v128_load(w + 184);
-      const v128_t vk22x4567 = wasm_v128_load(w + 188);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi22x4567, vk22x4567));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      const v128_t vi23x4567 = wasm_v128_load(i23 + 4);
-      i23 += 8;
-
-      const v128_t vk23x0123 = wasm_v128_load(w + 192);
-      const v128_t vk23x4567 = wasm_v128_load(w + 196);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi23x4567, vk23x4567));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      const v128_t vi24x4567 = wasm_v128_load(i24 + 4);
-      i24 += 8;
-
-      const v128_t vk24x0123 = wasm_v128_load(w + 200);
-      const v128_t vk24x4567 = wasm_v128_load(w + 204);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi24x4567, vk24x4567));
-
-      w += 208;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      v128_t vacc4567 = wasm_v128_bitselect(vmin, vacc4567p0, wasm_f32x4_lt(vacc4567p0, vmin));
-
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-      vacc4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
-
-      wasm_v128_store(output, vacc0123);
-      wasm_v128_store(output + 4, vacc4567);
-      output += 8;
-    }
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      i4 += 4;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      i5 += 4;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      i6 += 4;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      i7 += 4;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      i8 += 4;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      i9 += 4;
-
-      const v128_t vk9x0123 = wasm_v128_load(w + 80);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      i10 += 4;
-
-      const v128_t vk10x0123 = wasm_v128_load(w + 88);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      i11 += 4;
-
-      const v128_t vk11x0123 = wasm_v128_load(w + 96);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      i12 += 4;
-
-      const v128_t vk12x0123 = wasm_v128_load(w + 104);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      i13 += 4;
-
-      const v128_t vk13x0123 = wasm_v128_load(w + 112);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      i14 += 4;
-
-      const v128_t vk14x0123 = wasm_v128_load(w + 120);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      i15 += 4;
-
-      const v128_t vk15x0123 = wasm_v128_load(w + 128);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      i16 += 4;
-
-      const v128_t vk16x0123 = wasm_v128_load(w + 136);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      i17 += 4;
-
-      const v128_t vk17x0123 = wasm_v128_load(w + 144);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      i18 += 4;
-
-      const v128_t vk18x0123 = wasm_v128_load(w + 152);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      i19 += 4;
-
-      const v128_t vk19x0123 = wasm_v128_load(w + 160);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      i20 += 4;
-
-      const v128_t vk20x0123 = wasm_v128_load(w + 168);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      i21 += 4;
-
-      const v128_t vk21x0123 = wasm_v128_load(w + 176);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      i22 += 4;
-
-      const v128_t vk22x0123 = wasm_v128_load(w + 184);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      i23 += 4;
-
-      const v128_t vk23x0123 = wasm_v128_load(w + 192);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      i24 += 4;
-
-      const v128_t vk24x0123 = wasm_v128_load(w + 200);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-
-      w += 4;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      const v128_t vi9x0123 = wasm_v128_load(i9);
-      const v128_t vk9x0123 = wasm_v128_load(w + 80);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
-
-      const v128_t vi10x0123 = wasm_v128_load(i10);
-      const v128_t vk10x0123 = wasm_v128_load(w + 88);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
-
-      const v128_t vi11x0123 = wasm_v128_load(i11);
-      const v128_t vk11x0123 = wasm_v128_load(w + 96);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
-
-      const v128_t vi12x0123 = wasm_v128_load(i12);
-      const v128_t vk12x0123 = wasm_v128_load(w + 104);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
-
-      const v128_t vi13x0123 = wasm_v128_load(i13);
-      const v128_t vk13x0123 = wasm_v128_load(w + 112);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
-
-      const v128_t vi14x0123 = wasm_v128_load(i14);
-      const v128_t vk14x0123 = wasm_v128_load(w + 120);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
-
-      const v128_t vi15x0123 = wasm_v128_load(i15);
-      const v128_t vk15x0123 = wasm_v128_load(w + 128);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
-
-      const v128_t vi16x0123 = wasm_v128_load(i16);
-      const v128_t vk16x0123 = wasm_v128_load(w + 136);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
-
-      const v128_t vi17x0123 = wasm_v128_load(i17);
-      const v128_t vk17x0123 = wasm_v128_load(w + 144);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
-
-      const v128_t vi18x0123 = wasm_v128_load(i18);
-      const v128_t vk18x0123 = wasm_v128_load(w + 152);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
-
-      const v128_t vi19x0123 = wasm_v128_load(i19);
-      const v128_t vk19x0123 = wasm_v128_load(w + 160);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
-
-      const v128_t vi20x0123 = wasm_v128_load(i20);
-      const v128_t vk20x0123 = wasm_v128_load(w + 168);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
-
-      const v128_t vi21x0123 = wasm_v128_load(i21);
-      const v128_t vk21x0123 = wasm_v128_load(w + 176);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
-
-      const v128_t vi22x0123 = wasm_v128_load(i22);
-      const v128_t vk22x0123 = wasm_v128_load(w + 184);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
-
-      const v128_t vi23x0123 = wasm_v128_load(i23);
-      const v128_t vk23x0123 = wasm_v128_load(w + 192);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
-
-      const v128_t vi24x0123 = wasm_v128_load(i24);
-      const v128_t vk24x0123 = wasm_v128_load(w + 200);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-arm-acc2.c b/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-arm-acc2.c
new file mode 100644
index 0000000..249f661
--- /dev/null
+++ b/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-arm-acc2.c
@@ -0,0 +1,696 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    const float* i4 = input[4];
+    assert(i4 != NULL);
+    if XNN_UNPREDICTABLE(i4 != zero) {
+      i4 = (const float*) ((uintptr_t) i4 + input_offset);
+    }
+    const float* i5 = input[5];
+    assert(i5 != NULL);
+    if XNN_UNPREDICTABLE(i5 != zero) {
+      i5 = (const float*) ((uintptr_t) i5 + input_offset);
+    }
+    const float* i6 = input[6];
+    assert(i6 != NULL);
+    if XNN_UNPREDICTABLE(i6 != zero) {
+      i6 = (const float*) ((uintptr_t) i6 + input_offset);
+    }
+    const float* i7 = input[7];
+    assert(i7 != NULL);
+    if XNN_UNPREDICTABLE(i7 != zero) {
+      i7 = (const float*) ((uintptr_t) i7 + input_offset);
+    }
+    const float* i8 = input[8];
+    assert(i8 != NULL);
+    if XNN_UNPREDICTABLE(i8 != zero) {
+      i8 = (const float*) ((uintptr_t) i8 + input_offset);
+    }
+    const float* i9 = input[9];
+    assert(i9 != NULL);
+    if XNN_UNPREDICTABLE(i9 != zero) {
+      i9 = (const float*) ((uintptr_t) i9 + input_offset);
+    }
+    const float* i10 = input[10];
+    assert(i10 != NULL);
+    if XNN_UNPREDICTABLE(i10 != zero) {
+      i10 = (const float*) ((uintptr_t) i10 + input_offset);
+    }
+    const float* i11 = input[11];
+    assert(i11 != NULL);
+    if XNN_UNPREDICTABLE(i11 != zero) {
+      i11 = (const float*) ((uintptr_t) i11 + input_offset);
+    }
+    const float* i12 = input[12];
+    assert(i12 != NULL);
+    if XNN_UNPREDICTABLE(i12 != zero) {
+      i12 = (const float*) ((uintptr_t) i12 + input_offset);
+    }
+    const float* i13 = input[13];
+    assert(i13 != NULL);
+    if XNN_UNPREDICTABLE(i13 != zero) {
+      i13 = (const float*) ((uintptr_t) i13 + input_offset);
+    }
+    const float* i14 = input[14];
+    assert(i14 != NULL);
+    if XNN_UNPREDICTABLE(i14 != zero) {
+      i14 = (const float*) ((uintptr_t) i14 + input_offset);
+    }
+    const float* i15 = input[15];
+    assert(i15 != NULL);
+    if XNN_UNPREDICTABLE(i15 != zero) {
+      i15 = (const float*) ((uintptr_t) i15 + input_offset);
+    }
+    const float* i16 = input[16];
+    assert(i16 != NULL);
+    if XNN_UNPREDICTABLE(i16 != zero) {
+      i16 = (const float*) ((uintptr_t) i16 + input_offset);
+    }
+    const float* i17 = input[17];
+    assert(i17 != NULL);
+    if XNN_UNPREDICTABLE(i17 != zero) {
+      i17 = (const float*) ((uintptr_t) i17 + input_offset);
+    }
+    const float* i18 = input[18];
+    assert(i18 != NULL);
+    if XNN_UNPREDICTABLE(i18 != zero) {
+      i18 = (const float*) ((uintptr_t) i18 + input_offset);
+    }
+    const float* i19 = input[19];
+    assert(i19 != NULL);
+    if XNN_UNPREDICTABLE(i19 != zero) {
+      i19 = (const float*) ((uintptr_t) i19 + input_offset);
+    }
+    const float* i20 = input[20];
+    assert(i20 != NULL);
+    if XNN_UNPREDICTABLE(i20 != zero) {
+      i20 = (const float*) ((uintptr_t) i20 + input_offset);
+    }
+    const float* i21 = input[21];
+    assert(i21 != NULL);
+    if XNN_UNPREDICTABLE(i21 != zero) {
+      i21 = (const float*) ((uintptr_t) i21 + input_offset);
+    }
+    const float* i22 = input[22];
+    assert(i22 != NULL);
+    if XNN_UNPREDICTABLE(i22 != zero) {
+      i22 = (const float*) ((uintptr_t) i22 + input_offset);
+    }
+    const float* i23 = input[23];
+    assert(i23 != NULL);
+    if XNN_UNPREDICTABLE(i23 != zero) {
+      i23 = (const float*) ((uintptr_t) i23 + input_offset);
+    }
+    const float* i24 = input[24];
+    assert(i24 != NULL);
+    if XNN_UNPREDICTABLE(i24 != zero) {
+      i24 = (const float*) ((uintptr_t) i24 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 8; c -= 8) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+      v128_t vacc4567p0 = wasm_v128_load(w + 4);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
+      i0 += 8;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      const v128_t vk0x4567 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
+      i1 += 8;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      const v128_t vk1x4567 = wasm_v128_load(w + 20);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      const v128_t vk2x4567 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
+      i3 += 8;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      const v128_t vk3x4567 = wasm_v128_load(w + 36);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      const v128_t vk4x4567 = wasm_v128_load(w + 44);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi4x4567, vk4x4567));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
+      i5 += 8;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      const v128_t vk5x4567 = wasm_v128_load(w + 52);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi5x4567, vk5x4567));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      const v128_t vk6x4567 = wasm_v128_load(w + 60);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi6x4567, vk6x4567));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
+      i7 += 8;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      const v128_t vk7x4567 = wasm_v128_load(w + 68);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi7x4567, vk7x4567));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      const v128_t vk8x4567 = wasm_v128_load(w + 76);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi8x4567, vk8x4567));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      const v128_t vi9x4567 = wasm_v128_load(i9 + 4);
+      i9 += 8;
+
+      const v128_t vk9x0123 = wasm_v128_load(w + 80);
+      const v128_t vk9x4567 = wasm_v128_load(w + 84);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi9x4567, vk9x4567));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      const v128_t vi10x4567 = wasm_v128_load(i10 + 4);
+      i10 += 8;
+
+      const v128_t vk10x0123 = wasm_v128_load(w + 88);
+      const v128_t vk10x4567 = wasm_v128_load(w + 92);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi10x4567, vk10x4567));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      const v128_t vi11x4567 = wasm_v128_load(i11 + 4);
+      i11 += 8;
+
+      const v128_t vk11x0123 = wasm_v128_load(w + 96);
+      const v128_t vk11x4567 = wasm_v128_load(w + 100);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi11x4567, vk11x4567));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      const v128_t vi12x4567 = wasm_v128_load(i12 + 4);
+      i12 += 8;
+
+      const v128_t vk12x0123 = wasm_v128_load(w + 104);
+      const v128_t vk12x4567 = wasm_v128_load(w + 108);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi12x4567, vk12x4567));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      const v128_t vi13x4567 = wasm_v128_load(i13 + 4);
+      i13 += 8;
+
+      const v128_t vk13x0123 = wasm_v128_load(w + 112);
+      const v128_t vk13x4567 = wasm_v128_load(w + 116);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi13x4567, vk13x4567));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      const v128_t vi14x4567 = wasm_v128_load(i14 + 4);
+      i14 += 8;
+
+      const v128_t vk14x0123 = wasm_v128_load(w + 120);
+      const v128_t vk14x4567 = wasm_v128_load(w + 124);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi14x4567, vk14x4567));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      const v128_t vi15x4567 = wasm_v128_load(i15 + 4);
+      i15 += 8;
+
+      const v128_t vk15x0123 = wasm_v128_load(w + 128);
+      const v128_t vk15x4567 = wasm_v128_load(w + 132);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi15x4567, vk15x4567));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      const v128_t vi16x4567 = wasm_v128_load(i16 + 4);
+      i16 += 8;
+
+      const v128_t vk16x0123 = wasm_v128_load(w + 136);
+      const v128_t vk16x4567 = wasm_v128_load(w + 140);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi16x4567, vk16x4567));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      const v128_t vi17x4567 = wasm_v128_load(i17 + 4);
+      i17 += 8;
+
+      const v128_t vk17x0123 = wasm_v128_load(w + 144);
+      const v128_t vk17x4567 = wasm_v128_load(w + 148);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi17x4567, vk17x4567));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      const v128_t vi18x4567 = wasm_v128_load(i18 + 4);
+      i18 += 8;
+
+      const v128_t vk18x0123 = wasm_v128_load(w + 152);
+      const v128_t vk18x4567 = wasm_v128_load(w + 156);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi18x4567, vk18x4567));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      const v128_t vi19x4567 = wasm_v128_load(i19 + 4);
+      i19 += 8;
+
+      const v128_t vk19x0123 = wasm_v128_load(w + 160);
+      const v128_t vk19x4567 = wasm_v128_load(w + 164);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi19x4567, vk19x4567));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      const v128_t vi20x4567 = wasm_v128_load(i20 + 4);
+      i20 += 8;
+
+      const v128_t vk20x0123 = wasm_v128_load(w + 168);
+      const v128_t vk20x4567 = wasm_v128_load(w + 172);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi20x4567, vk20x4567));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      const v128_t vi21x4567 = wasm_v128_load(i21 + 4);
+      i21 += 8;
+
+      const v128_t vk21x0123 = wasm_v128_load(w + 176);
+      const v128_t vk21x4567 = wasm_v128_load(w + 180);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi21x4567, vk21x4567));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      const v128_t vi22x4567 = wasm_v128_load(i22 + 4);
+      i22 += 8;
+
+      const v128_t vk22x0123 = wasm_v128_load(w + 184);
+      const v128_t vk22x4567 = wasm_v128_load(w + 188);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi22x4567, vk22x4567));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      const v128_t vi23x4567 = wasm_v128_load(i23 + 4);
+      i23 += 8;
+
+      const v128_t vk23x0123 = wasm_v128_load(w + 192);
+      const v128_t vk23x4567 = wasm_v128_load(w + 196);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi23x4567, vk23x4567));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      const v128_t vi24x4567 = wasm_v128_load(i24 + 4);
+      i24 += 8;
+
+      const v128_t vk24x0123 = wasm_v128_load(w + 200);
+      const v128_t vk24x4567 = wasm_v128_load(w + 204);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi24x4567, vk24x4567));
+
+      w += 208;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      v128_t vacc4567 = wasm_f32x4_max(vacc4567p0, vmin);
+
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+      vacc4567 = wasm_f32x4_min(vacc4567, vmax);
+
+      wasm_v128_store(output, vacc0123);
+      wasm_v128_store(output + 4, vacc4567);
+      output += 8;
+    }
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      i4 += 4;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      i5 += 4;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      i6 += 4;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      i7 += 4;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      i8 += 4;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      i9 += 4;
+
+      const v128_t vk9x0123 = wasm_v128_load(w + 80);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      i10 += 4;
+
+      const v128_t vk10x0123 = wasm_v128_load(w + 88);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      i11 += 4;
+
+      const v128_t vk11x0123 = wasm_v128_load(w + 96);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      i12 += 4;
+
+      const v128_t vk12x0123 = wasm_v128_load(w + 104);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      i13 += 4;
+
+      const v128_t vk13x0123 = wasm_v128_load(w + 112);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      i14 += 4;
+
+      const v128_t vk14x0123 = wasm_v128_load(w + 120);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      i15 += 4;
+
+      const v128_t vk15x0123 = wasm_v128_load(w + 128);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      i16 += 4;
+
+      const v128_t vk16x0123 = wasm_v128_load(w + 136);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      i17 += 4;
+
+      const v128_t vk17x0123 = wasm_v128_load(w + 144);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      i18 += 4;
+
+      const v128_t vk18x0123 = wasm_v128_load(w + 152);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      i19 += 4;
+
+      const v128_t vk19x0123 = wasm_v128_load(w + 160);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      i20 += 4;
+
+      const v128_t vk20x0123 = wasm_v128_load(w + 168);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      i21 += 4;
+
+      const v128_t vk21x0123 = wasm_v128_load(w + 176);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      i22 += 4;
+
+      const v128_t vk22x0123 = wasm_v128_load(w + 184);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      i23 += 4;
+
+      const v128_t vk23x0123 = wasm_v128_load(w + 192);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      i24 += 4;
+
+      const v128_t vk24x0123 = wasm_v128_load(w + 200);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+
+      w += 4;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      const v128_t vk9x0123 = wasm_v128_load(w + 80);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      const v128_t vk10x0123 = wasm_v128_load(w + 88);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      const v128_t vk11x0123 = wasm_v128_load(w + 96);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      const v128_t vk12x0123 = wasm_v128_load(w + 104);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      const v128_t vk13x0123 = wasm_v128_load(w + 112);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      const v128_t vk14x0123 = wasm_v128_load(w + 120);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      const v128_t vk15x0123 = wasm_v128_load(w + 128);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      const v128_t vk16x0123 = wasm_v128_load(w + 136);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      const v128_t vk17x0123 = wasm_v128_load(w + 144);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      const v128_t vk18x0123 = wasm_v128_load(w + 152);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      const v128_t vk19x0123 = wasm_v128_load(w + 160);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      const v128_t vk20x0123 = wasm_v128_load(w + 168);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      const v128_t vk21x0123 = wasm_v128_load(w + 176);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      const v128_t vk22x0123 = wasm_v128_load(w + 184);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      const v128_t vk23x0123 = wasm_v128_load(w + 192);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      const v128_t vk24x0123 = wasm_v128_load(w + 200);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-x86-acc2.c b/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-x86-acc2.c
new file mode 100644
index 0000000..b446d17
--- /dev/null
+++ b/src/f32-dwconv/gen/up8x25-minmax-wasmsimd-x86-acc2.c
@@ -0,0 +1,696 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    const float* i4 = input[4];
+    assert(i4 != NULL);
+    if XNN_UNPREDICTABLE(i4 != zero) {
+      i4 = (const float*) ((uintptr_t) i4 + input_offset);
+    }
+    const float* i5 = input[5];
+    assert(i5 != NULL);
+    if XNN_UNPREDICTABLE(i5 != zero) {
+      i5 = (const float*) ((uintptr_t) i5 + input_offset);
+    }
+    const float* i6 = input[6];
+    assert(i6 != NULL);
+    if XNN_UNPREDICTABLE(i6 != zero) {
+      i6 = (const float*) ((uintptr_t) i6 + input_offset);
+    }
+    const float* i7 = input[7];
+    assert(i7 != NULL);
+    if XNN_UNPREDICTABLE(i7 != zero) {
+      i7 = (const float*) ((uintptr_t) i7 + input_offset);
+    }
+    const float* i8 = input[8];
+    assert(i8 != NULL);
+    if XNN_UNPREDICTABLE(i8 != zero) {
+      i8 = (const float*) ((uintptr_t) i8 + input_offset);
+    }
+    const float* i9 = input[9];
+    assert(i9 != NULL);
+    if XNN_UNPREDICTABLE(i9 != zero) {
+      i9 = (const float*) ((uintptr_t) i9 + input_offset);
+    }
+    const float* i10 = input[10];
+    assert(i10 != NULL);
+    if XNN_UNPREDICTABLE(i10 != zero) {
+      i10 = (const float*) ((uintptr_t) i10 + input_offset);
+    }
+    const float* i11 = input[11];
+    assert(i11 != NULL);
+    if XNN_UNPREDICTABLE(i11 != zero) {
+      i11 = (const float*) ((uintptr_t) i11 + input_offset);
+    }
+    const float* i12 = input[12];
+    assert(i12 != NULL);
+    if XNN_UNPREDICTABLE(i12 != zero) {
+      i12 = (const float*) ((uintptr_t) i12 + input_offset);
+    }
+    const float* i13 = input[13];
+    assert(i13 != NULL);
+    if XNN_UNPREDICTABLE(i13 != zero) {
+      i13 = (const float*) ((uintptr_t) i13 + input_offset);
+    }
+    const float* i14 = input[14];
+    assert(i14 != NULL);
+    if XNN_UNPREDICTABLE(i14 != zero) {
+      i14 = (const float*) ((uintptr_t) i14 + input_offset);
+    }
+    const float* i15 = input[15];
+    assert(i15 != NULL);
+    if XNN_UNPREDICTABLE(i15 != zero) {
+      i15 = (const float*) ((uintptr_t) i15 + input_offset);
+    }
+    const float* i16 = input[16];
+    assert(i16 != NULL);
+    if XNN_UNPREDICTABLE(i16 != zero) {
+      i16 = (const float*) ((uintptr_t) i16 + input_offset);
+    }
+    const float* i17 = input[17];
+    assert(i17 != NULL);
+    if XNN_UNPREDICTABLE(i17 != zero) {
+      i17 = (const float*) ((uintptr_t) i17 + input_offset);
+    }
+    const float* i18 = input[18];
+    assert(i18 != NULL);
+    if XNN_UNPREDICTABLE(i18 != zero) {
+      i18 = (const float*) ((uintptr_t) i18 + input_offset);
+    }
+    const float* i19 = input[19];
+    assert(i19 != NULL);
+    if XNN_UNPREDICTABLE(i19 != zero) {
+      i19 = (const float*) ((uintptr_t) i19 + input_offset);
+    }
+    const float* i20 = input[20];
+    assert(i20 != NULL);
+    if XNN_UNPREDICTABLE(i20 != zero) {
+      i20 = (const float*) ((uintptr_t) i20 + input_offset);
+    }
+    const float* i21 = input[21];
+    assert(i21 != NULL);
+    if XNN_UNPREDICTABLE(i21 != zero) {
+      i21 = (const float*) ((uintptr_t) i21 + input_offset);
+    }
+    const float* i22 = input[22];
+    assert(i22 != NULL);
+    if XNN_UNPREDICTABLE(i22 != zero) {
+      i22 = (const float*) ((uintptr_t) i22 + input_offset);
+    }
+    const float* i23 = input[23];
+    assert(i23 != NULL);
+    if XNN_UNPREDICTABLE(i23 != zero) {
+      i23 = (const float*) ((uintptr_t) i23 + input_offset);
+    }
+    const float* i24 = input[24];
+    assert(i24 != NULL);
+    if XNN_UNPREDICTABLE(i24 != zero) {
+      i24 = (const float*) ((uintptr_t) i24 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 8; c -= 8) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+      v128_t vacc4567p0 = wasm_v128_load(w + 4);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
+      i0 += 8;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      const v128_t vk0x4567 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
+      i1 += 8;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      const v128_t vk1x4567 = wasm_v128_load(w + 20);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      const v128_t vk2x4567 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
+      i3 += 8;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      const v128_t vk3x4567 = wasm_v128_load(w + 36);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      const v128_t vk4x4567 = wasm_v128_load(w + 44);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi4x4567, vk4x4567));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
+      i5 += 8;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      const v128_t vk5x4567 = wasm_v128_load(w + 52);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi5x4567, vk5x4567));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      const v128_t vk6x4567 = wasm_v128_load(w + 60);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi6x4567, vk6x4567));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
+      i7 += 8;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      const v128_t vk7x4567 = wasm_v128_load(w + 68);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi7x4567, vk7x4567));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      const v128_t vk8x4567 = wasm_v128_load(w + 76);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi8x4567, vk8x4567));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      const v128_t vi9x4567 = wasm_v128_load(i9 + 4);
+      i9 += 8;
+
+      const v128_t vk9x0123 = wasm_v128_load(w + 80);
+      const v128_t vk9x4567 = wasm_v128_load(w + 84);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi9x4567, vk9x4567));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      const v128_t vi10x4567 = wasm_v128_load(i10 + 4);
+      i10 += 8;
+
+      const v128_t vk10x0123 = wasm_v128_load(w + 88);
+      const v128_t vk10x4567 = wasm_v128_load(w + 92);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi10x4567, vk10x4567));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      const v128_t vi11x4567 = wasm_v128_load(i11 + 4);
+      i11 += 8;
+
+      const v128_t vk11x0123 = wasm_v128_load(w + 96);
+      const v128_t vk11x4567 = wasm_v128_load(w + 100);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi11x4567, vk11x4567));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      const v128_t vi12x4567 = wasm_v128_load(i12 + 4);
+      i12 += 8;
+
+      const v128_t vk12x0123 = wasm_v128_load(w + 104);
+      const v128_t vk12x4567 = wasm_v128_load(w + 108);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi12x4567, vk12x4567));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      const v128_t vi13x4567 = wasm_v128_load(i13 + 4);
+      i13 += 8;
+
+      const v128_t vk13x0123 = wasm_v128_load(w + 112);
+      const v128_t vk13x4567 = wasm_v128_load(w + 116);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi13x4567, vk13x4567));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      const v128_t vi14x4567 = wasm_v128_load(i14 + 4);
+      i14 += 8;
+
+      const v128_t vk14x0123 = wasm_v128_load(w + 120);
+      const v128_t vk14x4567 = wasm_v128_load(w + 124);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi14x4567, vk14x4567));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      const v128_t vi15x4567 = wasm_v128_load(i15 + 4);
+      i15 += 8;
+
+      const v128_t vk15x0123 = wasm_v128_load(w + 128);
+      const v128_t vk15x4567 = wasm_v128_load(w + 132);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi15x4567, vk15x4567));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      const v128_t vi16x4567 = wasm_v128_load(i16 + 4);
+      i16 += 8;
+
+      const v128_t vk16x0123 = wasm_v128_load(w + 136);
+      const v128_t vk16x4567 = wasm_v128_load(w + 140);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi16x4567, vk16x4567));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      const v128_t vi17x4567 = wasm_v128_load(i17 + 4);
+      i17 += 8;
+
+      const v128_t vk17x0123 = wasm_v128_load(w + 144);
+      const v128_t vk17x4567 = wasm_v128_load(w + 148);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi17x4567, vk17x4567));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      const v128_t vi18x4567 = wasm_v128_load(i18 + 4);
+      i18 += 8;
+
+      const v128_t vk18x0123 = wasm_v128_load(w + 152);
+      const v128_t vk18x4567 = wasm_v128_load(w + 156);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi18x4567, vk18x4567));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      const v128_t vi19x4567 = wasm_v128_load(i19 + 4);
+      i19 += 8;
+
+      const v128_t vk19x0123 = wasm_v128_load(w + 160);
+      const v128_t vk19x4567 = wasm_v128_load(w + 164);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi19x4567, vk19x4567));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      const v128_t vi20x4567 = wasm_v128_load(i20 + 4);
+      i20 += 8;
+
+      const v128_t vk20x0123 = wasm_v128_load(w + 168);
+      const v128_t vk20x4567 = wasm_v128_load(w + 172);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi20x4567, vk20x4567));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      const v128_t vi21x4567 = wasm_v128_load(i21 + 4);
+      i21 += 8;
+
+      const v128_t vk21x0123 = wasm_v128_load(w + 176);
+      const v128_t vk21x4567 = wasm_v128_load(w + 180);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi21x4567, vk21x4567));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      const v128_t vi22x4567 = wasm_v128_load(i22 + 4);
+      i22 += 8;
+
+      const v128_t vk22x0123 = wasm_v128_load(w + 184);
+      const v128_t vk22x4567 = wasm_v128_load(w + 188);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi22x4567, vk22x4567));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      const v128_t vi23x4567 = wasm_v128_load(i23 + 4);
+      i23 += 8;
+
+      const v128_t vk23x0123 = wasm_v128_load(w + 192);
+      const v128_t vk23x4567 = wasm_v128_load(w + 196);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi23x4567, vk23x4567));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      const v128_t vi24x4567 = wasm_v128_load(i24 + 4);
+      i24 += 8;
+
+      const v128_t vk24x0123 = wasm_v128_load(w + 200);
+      const v128_t vk24x4567 = wasm_v128_load(w + 204);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi24x4567, vk24x4567));
+
+      w += 208;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      v128_t vacc4567 = wasm_v128_bitselect(vmin, vacc4567p0, wasm_f32x4_lt(vacc4567p0, vmin));
+
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+      vacc4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
+
+      wasm_v128_store(output, vacc0123);
+      wasm_v128_store(output + 4, vacc4567);
+      output += 8;
+    }
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      i4 += 4;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      i5 += 4;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      i6 += 4;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      i7 += 4;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      i8 += 4;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      i9 += 4;
+
+      const v128_t vk9x0123 = wasm_v128_load(w + 80);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      i10 += 4;
+
+      const v128_t vk10x0123 = wasm_v128_load(w + 88);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      i11 += 4;
+
+      const v128_t vk11x0123 = wasm_v128_load(w + 96);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      i12 += 4;
+
+      const v128_t vk12x0123 = wasm_v128_load(w + 104);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      i13 += 4;
+
+      const v128_t vk13x0123 = wasm_v128_load(w + 112);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      i14 += 4;
+
+      const v128_t vk14x0123 = wasm_v128_load(w + 120);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      i15 += 4;
+
+      const v128_t vk15x0123 = wasm_v128_load(w + 128);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      i16 += 4;
+
+      const v128_t vk16x0123 = wasm_v128_load(w + 136);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      i17 += 4;
+
+      const v128_t vk17x0123 = wasm_v128_load(w + 144);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      i18 += 4;
+
+      const v128_t vk18x0123 = wasm_v128_load(w + 152);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      i19 += 4;
+
+      const v128_t vk19x0123 = wasm_v128_load(w + 160);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      i20 += 4;
+
+      const v128_t vk20x0123 = wasm_v128_load(w + 168);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      i21 += 4;
+
+      const v128_t vk21x0123 = wasm_v128_load(w + 176);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      i22 += 4;
+
+      const v128_t vk22x0123 = wasm_v128_load(w + 184);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      i23 += 4;
+
+      const v128_t vk23x0123 = wasm_v128_load(w + 192);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      i24 += 4;
+
+      const v128_t vk24x0123 = wasm_v128_load(w + 200);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+
+      w += 4;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      const v128_t vi9x0123 = wasm_v128_load(i9);
+      const v128_t vk9x0123 = wasm_v128_load(w + 80);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi9x0123, vk9x0123));
+
+      const v128_t vi10x0123 = wasm_v128_load(i10);
+      const v128_t vk10x0123 = wasm_v128_load(w + 88);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi10x0123, vk10x0123));
+
+      const v128_t vi11x0123 = wasm_v128_load(i11);
+      const v128_t vk11x0123 = wasm_v128_load(w + 96);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi11x0123, vk11x0123));
+
+      const v128_t vi12x0123 = wasm_v128_load(i12);
+      const v128_t vk12x0123 = wasm_v128_load(w + 104);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi12x0123, vk12x0123));
+
+      const v128_t vi13x0123 = wasm_v128_load(i13);
+      const v128_t vk13x0123 = wasm_v128_load(w + 112);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi13x0123, vk13x0123));
+
+      const v128_t vi14x0123 = wasm_v128_load(i14);
+      const v128_t vk14x0123 = wasm_v128_load(w + 120);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi14x0123, vk14x0123));
+
+      const v128_t vi15x0123 = wasm_v128_load(i15);
+      const v128_t vk15x0123 = wasm_v128_load(w + 128);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi15x0123, vk15x0123));
+
+      const v128_t vi16x0123 = wasm_v128_load(i16);
+      const v128_t vk16x0123 = wasm_v128_load(w + 136);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi16x0123, vk16x0123));
+
+      const v128_t vi17x0123 = wasm_v128_load(i17);
+      const v128_t vk17x0123 = wasm_v128_load(w + 144);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi17x0123, vk17x0123));
+
+      const v128_t vi18x0123 = wasm_v128_load(i18);
+      const v128_t vk18x0123 = wasm_v128_load(w + 152);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi18x0123, vk18x0123));
+
+      const v128_t vi19x0123 = wasm_v128_load(i19);
+      const v128_t vk19x0123 = wasm_v128_load(w + 160);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi19x0123, vk19x0123));
+
+      const v128_t vi20x0123 = wasm_v128_load(i20);
+      const v128_t vk20x0123 = wasm_v128_load(w + 168);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi20x0123, vk20x0123));
+
+      const v128_t vi21x0123 = wasm_v128_load(i21);
+      const v128_t vk21x0123 = wasm_v128_load(w + 176);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi21x0123, vk21x0123));
+
+      const v128_t vi22x0123 = wasm_v128_load(i22);
+      const v128_t vk22x0123 = wasm_v128_load(w + 184);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi22x0123, vk22x0123));
+
+      const v128_t vi23x0123 = wasm_v128_load(i23);
+      const v128_t vk23x0123 = wasm_v128_load(w + 192);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi23x0123, vk23x0123));
+
+      const v128_t vi24x0123 = wasm_v128_load(i24);
+      const v128_t vk24x0123 = wasm_v128_load(w + 200);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi24x0123, vk24x0123));
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-arm.c b/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-arm.c
deleted file mode 100644
index 020cd8b..0000000
--- a/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-arm.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 8; c -= 8) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-      v128_t vacc4567p0 = wasm_v128_load(w + 4);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
-      i0 += 8;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      const v128_t vk0x4567 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
-      i1 += 8;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      const v128_t vk1x4567 = wasm_v128_load(w + 20);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
-      i2 += 8;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      const v128_t vk2x4567 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
-      i3 += 8;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      const v128_t vk3x4567 = wasm_v128_load(w + 36);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
-
-      w += 40;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      v128_t vacc4567 = wasm_f32x4_max(vacc4567p0, vmin);
-
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-      vacc4567 = wasm_f32x4_min(vacc4567, vmax);
-
-      wasm_v128_store(output, vacc0123);
-      wasm_v128_store(output + 4, vacc4567);
-      output += 8;
-    }
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      w += 4;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-x86.c b/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-x86.c
deleted file mode 100644
index 5e1ca91..0000000
--- a/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-acc2-x86.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 8; c -= 8) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-      v128_t vacc4567p0 = wasm_v128_load(w + 4);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
-      i0 += 8;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      const v128_t vk0x4567 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
-      i1 += 8;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      const v128_t vk1x4567 = wasm_v128_load(w + 20);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
-      i2 += 8;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      const v128_t vk2x4567 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
-      i3 += 8;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      const v128_t vk3x4567 = wasm_v128_load(w + 36);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
-
-      w += 40;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      v128_t vacc4567 = wasm_v128_bitselect(vmin, vacc4567p0, wasm_f32x4_lt(vacc4567p0, vmin));
-
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-      vacc4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
-
-      wasm_v128_store(output, vacc0123);
-      wasm_v128_store(output + 4, vacc4567);
-      output += 8;
-    }
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      w += 4;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-arm-acc2.c b/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-arm-acc2.c
new file mode 100644
index 0000000..f6b47ff
--- /dev/null
+++ b/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-arm-acc2.c
@@ -0,0 +1,192 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 8; c -= 8) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+      v128_t vacc4567p0 = wasm_v128_load(w + 4);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
+      i0 += 8;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      const v128_t vk0x4567 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
+      i1 += 8;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      const v128_t vk1x4567 = wasm_v128_load(w + 20);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      const v128_t vk2x4567 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
+      i3 += 8;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      const v128_t vk3x4567 = wasm_v128_load(w + 36);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
+
+      w += 40;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      v128_t vacc4567 = wasm_f32x4_max(vacc4567p0, vmin);
+
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+      vacc4567 = wasm_f32x4_min(vacc4567, vmax);
+
+      wasm_v128_store(output, vacc0123);
+      wasm_v128_store(output + 4, vacc4567);
+      output += 8;
+    }
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      w += 4;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-x86-acc2.c b/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-x86-acc2.c
new file mode 100644
index 0000000..c33da4c
--- /dev/null
+++ b/src/f32-dwconv/gen/up8x4-minmax-wasmsimd-x86-acc2.c
@@ -0,0 +1,192 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 8; c -= 8) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+      v128_t vacc4567p0 = wasm_v128_load(w + 4);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
+      i0 += 8;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      const v128_t vk0x4567 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
+      i1 += 8;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      const v128_t vk1x4567 = wasm_v128_load(w + 20);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      const v128_t vk2x4567 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
+      i3 += 8;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      const v128_t vk3x4567 = wasm_v128_load(w + 36);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
+
+      w += 40;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      v128_t vacc4567 = wasm_v128_bitselect(vmin, vacc4567p0, wasm_f32x4_lt(vacc4567p0, vmin));
+
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+      vacc4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
+
+      wasm_v128_store(output, vacc0123);
+      wasm_v128_store(output + 4, vacc4567);
+      output += 8;
+    }
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      w += 4;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-arm.c b/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-arm.c
deleted file mode 100644
index bcf763b..0000000
--- a/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-arm.c
+++ /dev/null
@@ -1,312 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    const float* i4 = input[4];
-    assert(i4 != NULL);
-    if XNN_UNPREDICTABLE(i4 != zero) {
-      i4 = (const float*) ((uintptr_t) i4 + input_offset);
-    }
-    const float* i5 = input[5];
-    assert(i5 != NULL);
-    if XNN_UNPREDICTABLE(i5 != zero) {
-      i5 = (const float*) ((uintptr_t) i5 + input_offset);
-    }
-    const float* i6 = input[6];
-    assert(i6 != NULL);
-    if XNN_UNPREDICTABLE(i6 != zero) {
-      i6 = (const float*) ((uintptr_t) i6 + input_offset);
-    }
-    const float* i7 = input[7];
-    assert(i7 != NULL);
-    if XNN_UNPREDICTABLE(i7 != zero) {
-      i7 = (const float*) ((uintptr_t) i7 + input_offset);
-    }
-    const float* i8 = input[8];
-    assert(i8 != NULL);
-    if XNN_UNPREDICTABLE(i8 != zero) {
-      i8 = (const float*) ((uintptr_t) i8 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 8; c -= 8) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-      v128_t vacc4567p0 = wasm_v128_load(w + 4);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
-      i0 += 8;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      const v128_t vk0x4567 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
-      i1 += 8;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      const v128_t vk1x4567 = wasm_v128_load(w + 20);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
-      i2 += 8;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      const v128_t vk2x4567 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
-      i3 += 8;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      const v128_t vk3x4567 = wasm_v128_load(w + 36);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
-      i4 += 8;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      const v128_t vk4x4567 = wasm_v128_load(w + 44);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi4x4567, vk4x4567));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
-      i5 += 8;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      const v128_t vk5x4567 = wasm_v128_load(w + 52);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi5x4567, vk5x4567));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
-      i6 += 8;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      const v128_t vk6x4567 = wasm_v128_load(w + 60);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi6x4567, vk6x4567));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
-      i7 += 8;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      const v128_t vk7x4567 = wasm_v128_load(w + 68);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi7x4567, vk7x4567));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
-      i8 += 8;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      const v128_t vk8x4567 = wasm_v128_load(w + 76);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi8x4567, vk8x4567));
-
-      w += 80;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      v128_t vacc4567 = wasm_f32x4_max(vacc4567p0, vmin);
-
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-      vacc4567 = wasm_f32x4_min(vacc4567, vmax);
-
-      wasm_v128_store(output, vacc0123);
-      wasm_v128_store(output + 4, vacc4567);
-      output += 8;
-    }
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      i4 += 4;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      i5 += 4;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      i6 += 4;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      i7 += 4;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      i8 += 4;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      w += 4;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
-      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-x86.c b/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-x86.c
deleted file mode 100644
index 41c1c4b..0000000
--- a/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-acc2-x86.c
+++ /dev/null
@@ -1,312 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv/up-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/dwconv.h>
-
-
-void xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86(
-    size_t channels,
-    size_t output_width,
-    const float** input,
-    const float* weights,
-    float* output,
-    size_t input_stride,
-    size_t output_increment,
-    size_t input_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
-{
-  assert(channels != 0);
-  assert(output_width != 0);
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    const float* i0 = input[0];
-    assert(i0 != NULL);
-    if XNN_UNPREDICTABLE(i0 != zero) {
-      i0 = (const float*) ((uintptr_t) i0 + input_offset);
-    }
-    const float* i1 = input[1];
-    assert(i1 != NULL);
-    if XNN_UNPREDICTABLE(i1 != zero) {
-      i1 = (const float*) ((uintptr_t) i1 + input_offset);
-    }
-    const float* i2 = input[2];
-    assert(i2 != NULL);
-    if XNN_UNPREDICTABLE(i2 != zero) {
-      i2 = (const float*) ((uintptr_t) i2 + input_offset);
-    }
-    const float* i3 = input[3];
-    assert(i3 != NULL);
-    if XNN_UNPREDICTABLE(i3 != zero) {
-      i3 = (const float*) ((uintptr_t) i3 + input_offset);
-    }
-    const float* i4 = input[4];
-    assert(i4 != NULL);
-    if XNN_UNPREDICTABLE(i4 != zero) {
-      i4 = (const float*) ((uintptr_t) i4 + input_offset);
-    }
-    const float* i5 = input[5];
-    assert(i5 != NULL);
-    if XNN_UNPREDICTABLE(i5 != zero) {
-      i5 = (const float*) ((uintptr_t) i5 + input_offset);
-    }
-    const float* i6 = input[6];
-    assert(i6 != NULL);
-    if XNN_UNPREDICTABLE(i6 != zero) {
-      i6 = (const float*) ((uintptr_t) i6 + input_offset);
-    }
-    const float* i7 = input[7];
-    assert(i7 != NULL);
-    if XNN_UNPREDICTABLE(i7 != zero) {
-      i7 = (const float*) ((uintptr_t) i7 + input_offset);
-    }
-    const float* i8 = input[8];
-    assert(i8 != NULL);
-    if XNN_UNPREDICTABLE(i8 != zero) {
-      i8 = (const float*) ((uintptr_t) i8 + input_offset);
-    }
-    input = (const float**) ((uintptr_t) input + input_stride);
-
-    size_t c = channels;
-    const float* w = weights;
-    for (; c >= 8; c -= 8) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-      v128_t vacc4567p0 = wasm_v128_load(w + 4);
-
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
-      i0 += 8;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      const v128_t vk0x4567 = wasm_v128_load(w + 12);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
-      i1 += 8;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      const v128_t vk1x4567 = wasm_v128_load(w + 20);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
-      i2 += 8;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      const v128_t vk2x4567 = wasm_v128_load(w + 28);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
-      i3 += 8;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      const v128_t vk3x4567 = wasm_v128_load(w + 36);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
-      i4 += 8;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      const v128_t vk4x4567 = wasm_v128_load(w + 44);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi4x4567, vk4x4567));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
-      i5 += 8;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      const v128_t vk5x4567 = wasm_v128_load(w + 52);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi5x4567, vk5x4567));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
-      i6 += 8;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      const v128_t vk6x4567 = wasm_v128_load(w + 60);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi6x4567, vk6x4567));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
-      i7 += 8;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      const v128_t vk7x4567 = wasm_v128_load(w + 68);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi7x4567, vk7x4567));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
-      i8 += 8;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      const v128_t vk8x4567 = wasm_v128_load(w + 76);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi8x4567, vk8x4567));
-
-      w += 80;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      v128_t vacc4567 = wasm_v128_bitselect(vmin, vacc4567p0, wasm_f32x4_lt(vacc4567p0, vmin));
-
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-      vacc4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
-
-      wasm_v128_store(output, vacc0123);
-      wasm_v128_store(output + 4, vacc4567);
-      output += 8;
-    }
-    for (; c >= 4; c -= 4) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      i0 += 4;
-
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      i1 += 4;
-
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      i2 += 4;
-
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      i3 += 4;
-
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      i4 += 4;
-
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      i5 += 4;
-
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      i6 += 4;
-
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      i7 += 4;
-
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      i8 += 4;
-
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      w += 4;
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      wasm_v128_store(output, vacc0123);
-      output += 4;
-    }
-    if XNN_UNLIKELY(c != 0) {
-      v128_t vacc0123p0 = wasm_v128_load(w);
-
-      const v128_t vi0x0123 = wasm_v128_load(i0);
-      const v128_t vk0x0123 = wasm_v128_load(w + 8);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
-
-      const v128_t vi1x0123 = wasm_v128_load(i1);
-      const v128_t vk1x0123 = wasm_v128_load(w + 16);
-      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
-
-      const v128_t vi2x0123 = wasm_v128_load(i2);
-      const v128_t vk2x0123 = wasm_v128_load(w + 24);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
-
-      const v128_t vi3x0123 = wasm_v128_load(i3);
-      const v128_t vk3x0123 = wasm_v128_load(w + 32);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
-
-      const v128_t vi4x0123 = wasm_v128_load(i4);
-      const v128_t vk4x0123 = wasm_v128_load(w + 40);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
-
-      const v128_t vi5x0123 = wasm_v128_load(i5);
-      const v128_t vk5x0123 = wasm_v128_load(w + 48);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
-
-      const v128_t vi6x0123 = wasm_v128_load(i6);
-      const v128_t vk6x0123 = wasm_v128_load(w + 56);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
-
-      const v128_t vi7x0123 = wasm_v128_load(i7);
-      const v128_t vk7x0123 = wasm_v128_load(w + 64);
-      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
-
-      const v128_t vi8x0123 = wasm_v128_load(i8);
-      const v128_t vk8x0123 = wasm_v128_load(w + 72);
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
-
-      // Add up all accumulators to vacc01234567p0
-      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
-
-      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
-      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
-
-      if (c & 2) {
-        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
-        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
-        output += 2;
-      }
-      if (c & 1) {
-        *output = wasm_f32x4_extract_lane(vacc0123, 0);
-        output += 1;
-      }
-    }
-
-    output = (float*) ((uintptr_t) output + output_increment);
-  } while (--output_width != 0);
-}
diff --git a/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-arm-acc2.c b/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-arm-acc2.c
new file mode 100644
index 0000000..db27aad
--- /dev/null
+++ b/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-arm-acc2.c
@@ -0,0 +1,312 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    const float* i4 = input[4];
+    assert(i4 != NULL);
+    if XNN_UNPREDICTABLE(i4 != zero) {
+      i4 = (const float*) ((uintptr_t) i4 + input_offset);
+    }
+    const float* i5 = input[5];
+    assert(i5 != NULL);
+    if XNN_UNPREDICTABLE(i5 != zero) {
+      i5 = (const float*) ((uintptr_t) i5 + input_offset);
+    }
+    const float* i6 = input[6];
+    assert(i6 != NULL);
+    if XNN_UNPREDICTABLE(i6 != zero) {
+      i6 = (const float*) ((uintptr_t) i6 + input_offset);
+    }
+    const float* i7 = input[7];
+    assert(i7 != NULL);
+    if XNN_UNPREDICTABLE(i7 != zero) {
+      i7 = (const float*) ((uintptr_t) i7 + input_offset);
+    }
+    const float* i8 = input[8];
+    assert(i8 != NULL);
+    if XNN_UNPREDICTABLE(i8 != zero) {
+      i8 = (const float*) ((uintptr_t) i8 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 8; c -= 8) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+      v128_t vacc4567p0 = wasm_v128_load(w + 4);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
+      i0 += 8;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      const v128_t vk0x4567 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
+      i1 += 8;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      const v128_t vk1x4567 = wasm_v128_load(w + 20);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      const v128_t vk2x4567 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
+      i3 += 8;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      const v128_t vk3x4567 = wasm_v128_load(w + 36);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      const v128_t vk4x4567 = wasm_v128_load(w + 44);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi4x4567, vk4x4567));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
+      i5 += 8;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      const v128_t vk5x4567 = wasm_v128_load(w + 52);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi5x4567, vk5x4567));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      const v128_t vk6x4567 = wasm_v128_load(w + 60);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi6x4567, vk6x4567));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
+      i7 += 8;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      const v128_t vk7x4567 = wasm_v128_load(w + 68);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi7x4567, vk7x4567));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      const v128_t vk8x4567 = wasm_v128_load(w + 76);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi8x4567, vk8x4567));
+
+      w += 80;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      v128_t vacc4567 = wasm_f32x4_max(vacc4567p0, vmin);
+
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+      vacc4567 = wasm_f32x4_min(vacc4567, vmax);
+
+      wasm_v128_store(output, vacc0123);
+      wasm_v128_store(output + 4, vacc4567);
+      output += 8;
+    }
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      i4 += 4;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      i5 += 4;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      i6 += 4;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      i7 += 4;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      i8 += 4;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      w += 4;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
+      vacc0123 = wasm_f32x4_min(vacc0123, vmax);
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-x86-acc2.c b/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-x86-acc2.c
new file mode 100644
index 0000000..f2dc185
--- /dev/null
+++ b/src/f32-dwconv/gen/up8x9-minmax-wasmsimd-x86-acc2.c
@@ -0,0 +1,312 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv/up-wasmsimd.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2(
+    size_t channels,
+    size_t output_width,
+    const float** input,
+    const float* weights,
+    float* output,
+    size_t input_stride,
+    size_t output_increment,
+    size_t input_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(channels != 0);
+  assert(output_width != 0);
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    const float* i0 = input[0];
+    assert(i0 != NULL);
+    if XNN_UNPREDICTABLE(i0 != zero) {
+      i0 = (const float*) ((uintptr_t) i0 + input_offset);
+    }
+    const float* i1 = input[1];
+    assert(i1 != NULL);
+    if XNN_UNPREDICTABLE(i1 != zero) {
+      i1 = (const float*) ((uintptr_t) i1 + input_offset);
+    }
+    const float* i2 = input[2];
+    assert(i2 != NULL);
+    if XNN_UNPREDICTABLE(i2 != zero) {
+      i2 = (const float*) ((uintptr_t) i2 + input_offset);
+    }
+    const float* i3 = input[3];
+    assert(i3 != NULL);
+    if XNN_UNPREDICTABLE(i3 != zero) {
+      i3 = (const float*) ((uintptr_t) i3 + input_offset);
+    }
+    const float* i4 = input[4];
+    assert(i4 != NULL);
+    if XNN_UNPREDICTABLE(i4 != zero) {
+      i4 = (const float*) ((uintptr_t) i4 + input_offset);
+    }
+    const float* i5 = input[5];
+    assert(i5 != NULL);
+    if XNN_UNPREDICTABLE(i5 != zero) {
+      i5 = (const float*) ((uintptr_t) i5 + input_offset);
+    }
+    const float* i6 = input[6];
+    assert(i6 != NULL);
+    if XNN_UNPREDICTABLE(i6 != zero) {
+      i6 = (const float*) ((uintptr_t) i6 + input_offset);
+    }
+    const float* i7 = input[7];
+    assert(i7 != NULL);
+    if XNN_UNPREDICTABLE(i7 != zero) {
+      i7 = (const float*) ((uintptr_t) i7 + input_offset);
+    }
+    const float* i8 = input[8];
+    assert(i8 != NULL);
+    if XNN_UNPREDICTABLE(i8 != zero) {
+      i8 = (const float*) ((uintptr_t) i8 + input_offset);
+    }
+    input = (const float**) ((uintptr_t) input + input_stride);
+
+    size_t c = channels;
+    const float* w = weights;
+    for (; c >= 8; c -= 8) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+      v128_t vacc4567p0 = wasm_v128_load(w + 4);
+
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
+      i0 += 8;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      const v128_t vk0x4567 = wasm_v128_load(w + 12);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi0x4567, vk0x4567));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
+      i1 += 8;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      const v128_t vk1x4567 = wasm_v128_load(w + 20);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+      v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      const v128_t vk2x4567 = wasm_v128_load(w + 28);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi2x4567, vk2x4567));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
+      i3 += 8;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      const v128_t vk3x4567 = wasm_v128_load(w + 36);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi3x4567, vk3x4567));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      const v128_t vk4x4567 = wasm_v128_load(w + 44);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi4x4567, vk4x4567));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
+      i5 += 8;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      const v128_t vk5x4567 = wasm_v128_load(w + 52);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi5x4567, vk5x4567));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      const v128_t vk6x4567 = wasm_v128_load(w + 60);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi6x4567, vk6x4567));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
+      i7 += 8;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      const v128_t vk7x4567 = wasm_v128_load(w + 68);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+      vacc4567p1 = wasm_f32x4_add(vacc4567p1, wasm_f32x4_mul(vi7x4567, vk7x4567));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      const v128_t vk8x4567 = wasm_v128_load(w + 76);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, wasm_f32x4_mul(vi8x4567, vk8x4567));
+
+      w += 80;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+      vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      v128_t vacc4567 = wasm_v128_bitselect(vmin, vacc4567p0, wasm_f32x4_lt(vacc4567p0, vmin));
+
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+      vacc4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
+
+      wasm_v128_store(output, vacc0123);
+      wasm_v128_store(output + 4, vacc4567);
+      output += 8;
+    }
+    for (; c >= 4; c -= 4) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      i0 += 4;
+
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      i1 += 4;
+
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      i2 += 4;
+
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      i3 += 4;
+
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      i4 += 4;
+
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      i5 += 4;
+
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      i6 += 4;
+
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      i7 += 4;
+
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      i8 += 4;
+
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      w += 4;
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      wasm_v128_store(output, vacc0123);
+      output += 4;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      v128_t vacc0123p0 = wasm_v128_load(w);
+
+      const v128_t vi0x0123 = wasm_v128_load(i0);
+      const v128_t vk0x0123 = wasm_v128_load(w + 8);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi0x0123, vk0x0123));
+
+      const v128_t vi1x0123 = wasm_v128_load(i1);
+      const v128_t vk1x0123 = wasm_v128_load(w + 16);
+      v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
+
+      const v128_t vi2x0123 = wasm_v128_load(i2);
+      const v128_t vk2x0123 = wasm_v128_load(w + 24);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi2x0123, vk2x0123));
+
+      const v128_t vi3x0123 = wasm_v128_load(i3);
+      const v128_t vk3x0123 = wasm_v128_load(w + 32);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi3x0123, vk3x0123));
+
+      const v128_t vi4x0123 = wasm_v128_load(i4);
+      const v128_t vk4x0123 = wasm_v128_load(w + 40);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi4x0123, vk4x0123));
+
+      const v128_t vi5x0123 = wasm_v128_load(i5);
+      const v128_t vk5x0123 = wasm_v128_load(w + 48);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi5x0123, vk5x0123));
+
+      const v128_t vi6x0123 = wasm_v128_load(i6);
+      const v128_t vk6x0123 = wasm_v128_load(w + 56);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi6x0123, vk6x0123));
+
+      const v128_t vi7x0123 = wasm_v128_load(i7);
+      const v128_t vk7x0123 = wasm_v128_load(w + 64);
+      vacc0123p1 = wasm_f32x4_add(vacc0123p1, wasm_f32x4_mul(vi7x0123, vk7x0123));
+
+      const v128_t vi8x0123 = wasm_v128_load(i8);
+      const v128_t vk8x0123 = wasm_v128_load(w + 72);
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi8x0123, vk8x0123));
+
+      // Add up all accumulators to vacc01234567p0
+      vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
+
+      v128_t vacc0123 = wasm_v128_bitselect(vmin, vacc0123p0, wasm_f32x4_lt(vacc0123p0, vmin));
+      vacc0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
+
+      if (c & 2) {
+        *((double*) output) = wasm_f64x2_extract_lane(vacc0123, 0);
+        vacc0123 = wasm_v32x4_shuffle(vacc0123, vacc0123, 2, 3, 2, 3);
+        output += 2;
+      }
+      if (c & 1) {
+        *output = wasm_f32x4_extract_lane(vacc0123, 0);
+        output += 1;
+      }
+    }
+
+    output = (float*) ((uintptr_t) output + output_increment);
+  } while (--output_width != 0);
+}
diff --git a/src/f32-dwconv/up-wasmsimd.c.in b/src/f32-dwconv/up-wasmsimd.c.in
index fa1c663..d863b20 100644
--- a/src/f32-dwconv/up-wasmsimd.c.in
+++ b/src/f32-dwconv/up-wasmsimd.c.in
@@ -18,7 +18,7 @@
 $ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower())
 $ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm"
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
-void xnn_f32_dwconv${ACTIVATION_SUFFIX}_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__wasmsimd${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}${ARCH_SUFFIX}(
+void xnn_f32_dwconv${ACTIVATION_SUFFIX}_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__wasmsimd${ARCH_SUFFIX}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}(
     size_t channels,
     size_t output_width,
     const float** input,
diff --git a/src/f32-dwconv2d-chw/3x3p1-wasmsimd-1x4-acc3.c.in b/src/f32-dwconv2d-chw/3x3p1-wasmsimd-1x4-acc3.c.in
deleted file mode 100644
index e1e115d..0000000
--- a/src/f32-dwconv2d-chw/3x3p1-wasmsimd-1x4-acc3.c.in
+++ /dev/null
@@ -1,232 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-$ARCH_SUFFIX = "_x86" if X86 else "_arm"
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd${ARCH_SUFFIX}_1x4_acc3(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height == 1) {
-      i2 = zero;
-    }
-
-    // vi0x3012 = ( vi02, vi01, vi00, vi03 )
-    v128_t vi0x3012 = vzero;
-    // vi1x3012 = ( vi12, vi11, vi10, vi13 )
-    v128_t vi1x3012 = vzero;
-    // vi2x3012 = ( vi22, vi21, vi20, vi13 )
-    v128_t vi2x3012 = vzero;
-    // vi0x4567 = ( vi07, vi06, vi05, vi04 )
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    // vi1x4567 = ( vi17, vi16, vi15, vi14 )
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    // vi2x4567 = ( vi27, vi26, vi25, vi24 )
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      v128_t vo4567p0 = vbias;
-
-      // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      // vi1x89AB = ( vi1B, vi0A, vi09, vi08 )
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      // vi2x89AB = ( vi2B, vi0A, vi09, vi08 )
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-
-      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
-      const v128_t vi0x7456 = wasm_v32x4_shuffle(vi0x4567, vi0x4567, 3, 0, 1, 2);
-      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
-      const v128_t vi1x7456 = wasm_v32x4_shuffle(vi1x4567, vi1x4567, 3, 0, 1, 2);
-      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
-      const v128_t vi2x7456 = wasm_v32x4_shuffle(vi2x4567, vi2x4567, 3, 0, 1, 2);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo4567p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo4567p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x7456, vi0x3012, 4, 1, 2, 3);
-      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x7456, vi1x3012, 4, 1, 2, 3);
-      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x7456, vi2x3012, 4, 1, 2, 3);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk20));
-
-      vi0x3012 = vi0x7456;
-      vi1x3012 = vi1x7456;
-      vi2x3012 = vi2x7456;
-
-      // vi0x8567 = ( vi07, vi06, vi05, vi08 )
-      const v128_t vi0x8567 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 4, 1, 2, 3);
-      // vi1x8567 = ( vi17, vi16, vi15, vi18 )
-      const v128_t vi1x8567 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 4, 1, 2, 3);
-      // vi2x8567 = ( vi27, vi26, vi25, vi28 )
-      const v128_t vi2x8567 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 4, 1, 2, 3);
-
-      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x8567, vi0x8567, 1, 2, 3, 0);
-      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x8567, vi1x8567, 1, 2, 3, 0);
-      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x8567, vi2x8567, 1, 2, 3, 0);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-
-      v128_t vo = wasm_f32x4_add(vo4567p0, vo4567p1);
-      vo = wasm_f32x4_add(vo, vo4567p2);
-
-      $if X86:
-        vo = wasm_v128_bitselect(vmin, vo, wasm_f32x4_lt(vo, vmin));
-        vo = wasm_v128_bitselect(vo, vmax, wasm_f32x4_le(vo, vmax));
-      $else:
-        vo = wasm_f32x4_max(vo, vmin);
-        vo = wasm_f32x4_min(vo, vmax);
-
-      wasm_v128_store(output, vo);
-      output += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      v128_t vo4567p0 = vbias;
-
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-
-      // vi0x7456 = ( vi06, vi05, vi04, vi07 )
-      const v128_t vi0x7456 = wasm_v32x4_shuffle(vi0x4567, vi0x4567, 3, 0, 1, 2);
-      // vi1x7456 = ( vi16, vi15, vi14, vi17 )
-      const v128_t vi1x7456 = wasm_v32x4_shuffle(vi1x4567, vi1x4567, 3, 0, 1, 2);
-      // vi2x7456 = ( vi26, vi25, vi24, vi27 )
-      const v128_t vi2x7456 = wasm_v32x4_shuffle(vi2x4567, vi2x4567, 3, 0, 1, 2);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo4567p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo4567p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      // vi0x3456 = ( vi06, vi05, vi04, vi03 )
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x7456, vi0x3012, 4, 1, 2, 3);
-      // vi1x3456 = ( vi16, vi15, vi14, vi13 )
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x7456, vi1x3012, 4, 1, 2, 3);
-      // vi2x3456 = ( vi26, vi25, vi24, vi23 )
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x7456, vi2x3012, 4, 1, 2, 3);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk20));
-
-      // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
-      const v128_t vi0x8567 = wasm_v32x4_shuffle(vi0x4567, vzero, 4, 1, 2, 3);
-      // vi1x8567 = ( vi17, vi16, vi15, 0.0 )
-      const v128_t vi1x8567 = wasm_v32x4_shuffle(vi1x4567, vzero, 4, 1, 2, 3);
-      // vi2x8567 = ( vi27, vi26, vi25, 0.0 )
-      const v128_t vi2x8567 = wasm_v32x4_shuffle(vi2x4567, vzero, 4, 1, 2, 3);
-
-      // vi0x5678 = ( vi08, vi07, vi06, vi05 )
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x8567, vi0x8567, 1, 2, 3, 0);
-      // vi1x5678 = ( vi18, vi17, vi16, vi15 )
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x8567, vi1x8567, 1, 2, 3, 0);
-      // vi2x5678 = ( vi28, vi27, vi26, vi25 )
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x8567, vi2x8567, 1, 2, 3, 0);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk22));
-
-      v128_t vo = wasm_f32x4_add(vo4567p0, vo4567p1);
-      vo = wasm_f32x4_add(vo, vo4567p2);
-
-      $if X86:
-        vo = wasm_v128_bitselect(vmin, vo, wasm_f32x4_lt(vo, vmin));
-        vo = wasm_v128_bitselect(vo, vmax, wasm_f32x4_le(vo, vmax));
-      $else:
-        vo = wasm_f32x4_max(vo, vmin);
-        vo = wasm_f32x4_min(vo, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(output, vo);
-        output += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo, 0);
-          output += 2;
-          vo = wasm_v32x4_shuffle(vo, vo, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *output = wasm_f32x4_extract_lane(vo, 0);
-          output += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-  } while (--output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in b/src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
new file mode 100644
index 0000000..0dbeb27
--- /dev/null
+++ b/src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
@@ -0,0 +1,230 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+$ARCH_SUFFIX = "_x86" if X86 else "_arm"
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd${ARCH_SUFFIX}_loadsplat_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  $for M in range(2, 2 + ROW_TILE):
+    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+  float* o0 = output;
+  $for M in range(1, ROW_TILE):
+    float* o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+  size_t output_height = input_height;
+  do {
+    $for M in range(2, 2 + ROW_TILE):
+      if XNN_UNPREDICTABLE(output_height < ${M}) {
+        i${M} = zero;
+        $if M <= ROW_TILE:
+          o${M-1} = o${M-2};
+      }
+
+    $for M in range(2 + ROW_TILE):
+      v128_t vi${M}x0123 = vzero;
+
+    $for M in range(2 + ROW_TILE):
+      v128_t vi${M}x4567 = wasm_v128_load(i${M});
+      i${M} += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x89AB = wasm_v128_load(i${M});
+        i${M} += 4;
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          $if K == 0:
+            v128_t vo${M}p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi${M+K}x4567, vk${K}1));
+          $elif K < ACCUMULATORS:
+            v128_t vo${M}p${K} = wasm_f32x4_mul(vi${M+K}x4567, vk${K}1);
+          $else:
+            vo${M}p${K % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${K % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x4567, vk${K}1));
+
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          $if K+3 < ACCUMULATORS:
+            v128_t vo${M}p${K+3} = wasm_f32x4_mul(vi${M+K}x3456, vk${K}0);
+          $else:
+            vo${M}p${(K+3) % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${(K+3) % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x3456, vk${K}0));
+
+      $for M in range(2 + ROW_TILE):
+        vi${M}x0123 = vi${M}x4567;
+
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 1, 2, 3, 4);
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          vo${M}p${(K+6) % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${(K+6) % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x5678, vk${K}2));
+
+      $for M in range(2 + ROW_TILE):
+        vi${M}x4567 = vi${M}x89AB;
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M});
+        o${M} += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      $for M in range(2 + ROW_TILE):
+        vi${M}x4567 = wasm_v128_and(vmask, vi${M}x4567);
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          $if K == 0:
+            v128_t vo${M}p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi${M+K}x4567, vk${K}1));
+          $elif K < ACCUMULATORS:
+            v128_t vo${M}p${K} = wasm_f32x4_mul(vi${M+K}x4567, vk${K}1);
+          $else:
+            vo${M}p${K % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${K % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x4567, vk${K}1));
+
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          $if K+3 < ACCUMULATORS:
+            v128_t vo${M}p${K+3} = wasm_f32x4_mul(vi${M+K}x3456, vk${K}0);
+          $else:
+            vo${M}p${(K+3) % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${(K+3) % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x3456, vk${K}0));
+
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vzero, 1, 2, 3, 4);
+
+      $for K in range(3):
+        $for M in range(ROW_TILE):
+          vo${M}p${(K+6) % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${(K+6) % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x5678, vk${K}2));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        $for M in reversed(range(ROW_TILE)):
+          wasm_v128_store(o${M}, vo${M});
+          o${M} += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0);
+            o${M} += 2;
+
+          $for M in range(ROW_TILE):
+            vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *o${M} = wasm_f32x4_extract_lane(vo${M}, 0);
+            o${M} += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i${ROW_TILE} - input_decrement);
+    i1 = (const float*) ((uintptr_t) i${ROW_TILE+1} - input_decrement);
+    $for M in range(2, 2 + ROW_TILE):
+      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      o0 = o${ROW_TILE - 1};
+      $for M in range(1, ROW_TILE):
+        o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      output_height = doz(output_height, ${ROW_TILE});
+  } while (${"--" if ROW_TILE == 1 else ""}output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in b/src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
new file mode 100644
index 0000000..c9d0da4
--- /dev/null
+++ b/src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
@@ -0,0 +1,251 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+$ARCH_SUFFIX = "_x86" if X86 else "_arm"
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd${ARCH_SUFFIX}_splat_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  $for M in range(2, 2 + ROW_TILE):
+    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+  float* o0 = output;
+  $for M in range(1, ROW_TILE):
+    float* o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+  size_t output_height = input_height;
+  do {
+    $for M in range(2, 2 + ROW_TILE):
+      if XNN_UNPREDICTABLE(output_height < ${M}) {
+        i${M} = zero;
+        $if M <= ROW_TILE:
+          o${M-1} = o${M-2};
+      }
+
+    $for M in range(2 + ROW_TILE):
+      v128_t vi${M}x0123 = vzero;
+
+    $for M in range(2 + ROW_TILE):
+      v128_t vi${M}x4567 = wasm_v128_load(i${M}); i${M} += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x89AB = wasm_v128_load(i${M}); i${M} += 4;
+
+      $for M in range(ROW_TILE):
+        vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS >= 2:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS >= 3:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS >= 4:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
+        $else:
+          vo${M}p${3 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${3 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      $for M in range(2 + ROW_TILE):
+        vi${M}x0123 = vi${M}x4567;
+
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      $for M in range(2 + ROW_TILE):
+        vi${M}x4567 = vi${M}x89AB;
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      $for M in range(2 + ROW_TILE):
+        vi${M}x4567 = wasm_v128_and(vmask, vi${M}x4567);
+
+      $for M in range(ROW_TILE):
+        vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS >= 2:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS >= 3:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS >= 4:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
+        $else:
+          vo${M}p${3 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${3 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      $for M in range(2 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vzero, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        $for M in reversed(range(ROW_TILE)):
+          wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0); o${M} += 2;
+
+          $for M in range(ROW_TILE):
+            vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *o${M} = wasm_f32x4_extract_lane(vo${M}, 0); o${M} += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i${ROW_TILE} - input_decrement);
+    i1 = (const float*) ((uintptr_t) i${ROW_TILE+1} - input_decrement);
+    $for M in range(2, 2 + ROW_TILE):
+      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      o0 = o${ROW_TILE - 1};
+      $for M in range(1, ROW_TILE):
+        o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      output_height = doz(output_height, ${ROW_TILE});
+  } while (${"--" if ROW_TILE == 1 else ""}output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in b/src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
deleted file mode 100644
index 477a6ab..0000000
--- a/src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-$assert ROW_TILE >= 1
-$assert ACCUMULATORS >= 1
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-$ARCH_SUFFIX = "_x86" if X86 else "_arm"
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd${ARCH_SUFFIX}_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  $for M in range(2, 2 + ROW_TILE):
-    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
-
-  float* o0 = output;
-  $for M in range(1, ROW_TILE):
-    float* o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
-
-  size_t output_height = input_height;
-  do {
-    $for M in range(2, 2 + ROW_TILE):
-      if XNN_UNPREDICTABLE(output_height < ${M}) {
-        i${M} = zero;
-        $if M <= ROW_TILE:
-          o${M-1} = o${M-2};
-      }
-
-    $for M in range(2 + ROW_TILE):
-      v128_t vi${M}x0123 = vzero;
-
-    $for M in range(2 + ROW_TILE):
-      v128_t vi${M}x4567 = wasm_v128_load(i${M});
-      i${M} += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      $for M in range(2 + ROW_TILE):
-        const v128_t vi${M}x89AB = wasm_v128_load(i${M});
-        i${M} += 4;
-
-      $for K in range(3):
-        $for M in range(ROW_TILE):
-          $if K == 0:
-            v128_t vo${M}p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi${M+K}x4567, vk${K}1));
-          $elif K < ACCUMULATORS:
-            v128_t vo${M}p${K} = wasm_f32x4_mul(vi${M+K}x4567, vk${K}1);
-          $else:
-            vo${M}p${K % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${K % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x4567, vk${K}1));
-
-      $for M in range(2 + ROW_TILE):
-        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
-
-      $for K in range(3):
-        $for M in range(ROW_TILE):
-          $if K+3 < ACCUMULATORS:
-            v128_t vo${M}p${K+3} = wasm_f32x4_mul(vi${M+K}x3456, vk${K}0);
-          $else:
-            vo${M}p${(K+3) % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${(K+3) % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x3456, vk${K}0));
-
-      $for M in range(2 + ROW_TILE):
-        vi${M}x0123 = vi${M}x4567;
-
-      $for M in range(2 + ROW_TILE):
-        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 1, 2, 3, 4);
-
-      $for K in range(3):
-        $for M in range(ROW_TILE):
-          vo${M}p${(K+6) % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${(K+6) % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x5678, vk${K}2));
-
-      $for M in range(2 + ROW_TILE):
-        vi${M}x4567 = vi${M}x89AB;
-
-      $if ACCUMULATORS > 1:
-        $ACC_SLICE = 1
-        $while ACC_SLICE < ACCUMULATORS:
-          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
-            $if A + ACC_SLICE < ACCUMULATORS:
-              $for M in range(ROW_TILE):
-                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
-          $ACC_SLICE *= 2
-
-      $if X86:
-        $for M in range(ROW_TILE):
-          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
-        $for M in range(ROW_TILE):
-          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
-      $else:
-        $for M in range(ROW_TILE):
-          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
-        $for M in range(ROW_TILE):
-          vo${M} = wasm_f32x4_min(vo${M}, vmax);
-
-      $for M in reversed(range(ROW_TILE)):
-        wasm_v128_store(o${M}, vo${M});
-        o${M} += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      $for M in range(2 + ROW_TILE):
-        vi${M}x4567 = wasm_v128_and(vmask, vi${M}x4567);
-
-      $for K in range(3):
-        $for M in range(ROW_TILE):
-          $if K == 0:
-            v128_t vo${M}p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi${M+K}x4567, vk${K}1));
-          $elif K < ACCUMULATORS:
-            v128_t vo${M}p${K} = wasm_f32x4_mul(vi${M+K}x4567, vk${K}1);
-          $else:
-            vo${M}p${K % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${K % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x4567, vk${K}1));
-
-      $for M in range(2 + ROW_TILE):
-        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
-
-      $for K in range(3):
-        $for M in range(ROW_TILE):
-          $if K+3 < ACCUMULATORS:
-            v128_t vo${M}p${K+3} = wasm_f32x4_mul(vi${M+K}x3456, vk${K}0);
-          $else:
-            vo${M}p${(K+3) % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${(K+3) % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x3456, vk${K}0));
-
-      $for M in range(2 + ROW_TILE):
-        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vzero, 1, 2, 3, 4);
-
-      $for K in range(3):
-        $for M in range(ROW_TILE):
-          vo${M}p${(K+6) % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${(K+6) % ACCUMULATORS}, wasm_f32x4_mul(vi${M+K}x5678, vk${K}2));
-
-      $if ACCUMULATORS > 1:
-        $ACC_SLICE = 1
-        $while ACC_SLICE < ACCUMULATORS:
-          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
-            $if A + ACC_SLICE < ACCUMULATORS:
-              $for M in range(ROW_TILE):
-                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
-          $ACC_SLICE *= 2
-
-      $if X86:
-        $for M in range(ROW_TILE):
-          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
-        $for M in range(ROW_TILE):
-          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
-      $else:
-        $for M in range(ROW_TILE):
-          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
-        $for M in range(ROW_TILE):
-          vo${M} = wasm_f32x4_min(vo${M}, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        $for M in reversed(range(ROW_TILE)):
-          wasm_v128_store(o${M}, vo${M});
-          o${M} += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          $for M in reversed(range(ROW_TILE)):
-            *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0);
-            o${M} += 2;
-
-          $for M in range(ROW_TILE):
-            vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          $for M in reversed(range(ROW_TILE)):
-            *o${M} = wasm_f32x4_extract_lane(vo${M}, 0);
-            o${M} += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i${ROW_TILE} - input_decrement);
-    i1 = (const float*) ((uintptr_t) i${ROW_TILE+1} - input_decrement);
-    $for M in range(2, 2 + ROW_TILE):
-      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
-
-    $if ROW_TILE > 1:
-      o0 = o${ROW_TILE - 1};
-      $for M in range(1, ROW_TILE):
-        o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
-
-    $if ROW_TILE > 1:
-      output_height = doz(output_height, ${ROW_TILE});
-  } while (${"--" if ROW_TILE == 1 else ""}output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-1x4-acc3.c.in b/src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-1x4-acc3.c.in
deleted file mode 100644
index 882c693..0000000
--- a/src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-1x4-acc3.c.in
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-$ARCH_SUFFIX = "_x86" if X86 else "_arm"
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd${ARCH_SUFFIX}_1x4_acc3(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top >= 0);
-  assert(padding_top <= 1);
-
-  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
-  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
-
-  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
-  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
-  if XNN_UNPREDICTABLE(padding_top != 0) {
-    i0 = zero;
-  }
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
-  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
-  do {
-    if XNN_UNPREDICTABLE(padded_input_height <= 3) {
-      i2 = zero;
-    }
-
-    v128_t vi0x7531 = vzero;
-    v128_t vi1x7531 = vzero;
-    v128_t vi2x7531 = vzero;
-
-    size_t w = input_width;
-    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
-      v128_t vo8ACEp0 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-
-      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
-      i0 += 8;
-      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
-      i1 += 8;
-      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
-      i2 += 8;
-
-      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 4 + 1, 4 + 3);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x8ACE, vk01));
-      v128_t vo8ACEp1 = wasm_f32x4_mul(vi1x8ACE, vk11);
-      v128_t vo8ACEp2 = wasm_f32x4_mul(vi2x8ACE, vk21);
-
-      const v128_t vi0xF9BD = wasm_v32x4_shuffle(vi0x9BDF, vi0x9BDF, 3, 0, 1, 2);
-      const v128_t vi1xF9BD = wasm_v32x4_shuffle(vi1x9BDF, vi1x9BDF, 3, 0, 1, 2);
-      const v128_t vi2xF9BD = wasm_v32x4_shuffle(vi2x9BDF, vi2x9BDF, 3, 0, 1, 2);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x9BDF, vk02));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x9BDF, vk12));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x9BDF, vk22));
-
-      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0xF9BD, vi0x7531, 4, 1, 2, 3);
-      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1xF9BD, vi1x7531, 4, 1, 2, 3);
-      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2xF9BD, vi2x7531, 4, 1, 2, 3);
-
-      vi0x7531 = vi0xF9BD;
-      vi1x7531 = vi1xF9BD;
-      vi2x7531 = vi2xF9BD;
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x7BDF, vk00));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x7BDF, vk10));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x7BDF, vk20));
-
-      v128_t vo = wasm_f32x4_add(vo8ACEp0, vo8ACEp1);
-      vo = wasm_f32x4_add(vo, vo8ACEp2);
-
-      $if X86:
-        vo = wasm_v128_bitselect(vmin, vo, wasm_f32x4_lt(vo, vmin));
-        vo = wasm_v128_bitselect(vo, vmax, wasm_f32x4_le(vo, vmax));
-      $else:
-        vo = wasm_f32x4_max(vo, vmin);
-        vo = wasm_f32x4_min(vo, vmax);
-
-      wasm_v128_store(output, vo);
-      output += 4;
-    }
-    // Potentially process the last block of 0..7 pixels.
-    assert(w < 8 * sizeof(float));
-    if XNN_LIKELY(w != 0) {
-      v128_t vo8ACEp0 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-
-      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
-      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
-      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
-
-      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4 + 0, 4 + 2));
-      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 4 + 1, 4 + 3));
-      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4 + 0, 4 + 2));
-      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 4 + 1, 4 + 3));
-      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4 + 0, 4 + 2));
-      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 4 + 1, 4 + 3));
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x8ACE, vk01));
-      v128_t vo8ACEp1 = wasm_f32x4_mul(vi1x8ACE, vk11);
-      v128_t vo8ACEp2 = wasm_f32x4_mul(vi2x8ACE, vk21);
-
-      const v128_t vi0xF9BD = wasm_v32x4_shuffle(vi0x9BDF, vi0x9BDF, 3, 0, 1, 2);
-      const v128_t vi1xF9BD = wasm_v32x4_shuffle(vi1x9BDF, vi1x9BDF, 3, 0, 1, 2);
-      const v128_t vi2xF9BD = wasm_v32x4_shuffle(vi2x9BDF, vi2x9BDF, 3, 0, 1, 2);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x9BDF, vk02));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x9BDF, vk12));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x9BDF, vk22));
-
-      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0xF9BD, vi0x7531, 4, 1, 2, 3);
-      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1xF9BD, vi1x7531, 4, 1, 2, 3);
-      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2xF9BD, vi2x7531, 4, 1, 2, 3);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x7BDF, vk00));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x7BDF, vk10));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x7BDF, vk20));
-
-      v128_t vo = wasm_f32x4_add(vo8ACEp0, vo8ACEp1);
-      vo = wasm_f32x4_add(vo, vo8ACEp2);
-
-      $if X86:
-        vo = wasm_v128_bitselect(vmin, vo, wasm_f32x4_lt(vo, vmin));
-        vo = wasm_v128_bitselect(vo, vmax, wasm_f32x4_le(vo, vmax));
-      $else:
-        vo = wasm_f32x4_max(vo, vmin);
-        vo = wasm_f32x4_min(vo, vmax);
-
-      if (w == 7 * sizeof(float)) {
-        wasm_v128_store(output, vo);
-        output += 4;
-      } else {
-        w += 1 * sizeof(float);
-        if (w & (4 * sizeof(float))) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo, 0);
-          output += 2;
-          vo = wasm_v32x4_shuffle(vo, vo, 2, 3, 0, 1);
-        }
-        if (w & (2 * sizeof(float))) {
-          *output = wasm_f32x4_extract_lane(vo, 0);
-          output += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i0 + input_width);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-    output_height -= 1;
-    padded_input_height -= 2;
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in b/src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
new file mode 100644
index 0000000..49eb9d6
--- /dev/null
+++ b/src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
@@ -0,0 +1,287 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+$ARCH_SUFFIX = "_x86" if X86 else "_arm"
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd${ARCH_SUFFIX}_loadsplat_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  $if ROW_TILE > 1:
+    const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  $for M in range(2, 1 + 2 * ROW_TILE):
+    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+  float* o0 = output;
+  $for M in range(1, ROW_TILE):
+    float* o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    $for M in range(2, 1 + 2 * ROW_TILE):
+      if XNN_UNPREDICTABLE(padded_input_height < ${2 + M}) {
+        i${M} = zero;
+        $if M % 2 == 1:
+          o${(M - 1) / 2} = o${(M - 1) / 2 - 1};
+      }
+
+    $for M in range(1 + 2 * ROW_TILE):
+      v128_t vi${M}x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = vbias;
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x89AB = wasm_v128_load(i${M});
+        const v128_t vi${M}xCDEF = wasm_v128_load(i${M} + 4);
+        i${M} += 8;
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x8ACE = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 0, 2, 4, 6);
+        const v128_t vi${M}x9BDF = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 1, 3, 5, 7);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, vk01);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, vk01));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, vk11);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, vk11));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, vk21);
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x8ACE, vk21));
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x7BDF = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
+        vi${M}x1357 = vi${M}x9BDF;
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M}x7BDF, vk00);
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x7BDF, vk00));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 5:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${2*M+1}x7BDF, vk10);
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x7BDF, vk10));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p6 = wasm_f32x4_mul(vi${2*M+2}x7BDF, vk11);
+        $else:
+          vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x7BDF, vk20));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9BDF, vk02));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x9BDF, vk12));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x9BDF, vk22));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = vbias;
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x89AB = wasm_v128_load(i${M});
+        const v128_t vi${M}xCDEF = wasm_v128_load(i${M} + 4);
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 0, 2, 4, 6));
+        const v128_t vi${M}x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 1, 3, 5, 7));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, vk01);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, vk01));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, vk11);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, vk11));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, vk21);
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x8ACE, vk21));
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x7BDF = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M}x7BDF, vk00);
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x7BDF, vk00));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 5:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${2*M+1}x7BDF, vk10);
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x7BDF, vk10));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p6 = wasm_f32x4_mul(vi${2*M+2}x7BDF, vk11);
+        $else:
+          vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x7BDF, vk20));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9BDF, vk02));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x9BDF, vk12));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x9BDF, vk22));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        $for M in reversed(range(ROW_TILE)):
+          wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0); o${M} += 2;
+
+          $for M in range(ROW_TILE):
+            vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *o${M} = wasm_f32x4_extract_lane(vo${M}, 0); o${M} += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i${2 * ROW_TILE} - input_decrement);
+    $for M in range(1, 1 + 2 * ROW_TILE):
+      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      o0 = o${ROW_TILE - 1};
+      $for M in range(1, ROW_TILE):
+        o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+    $if ROW_TILE > 1:
+      output_height = doz(output_height, ${ROW_TILE});
+      padded_input_height = doz(padded_input_height, ${ROW_TILE * 2});
+    $else:
+      output_height -= 1;
+      padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in b/src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
new file mode 100644
index 0000000..5fbd635
--- /dev/null
+++ b/src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
@@ -0,0 +1,277 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+$ARCH_SUFFIX = "_x86" if X86 else "_arm"
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd${ARCH_SUFFIX}_splat_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  $if ROW_TILE > 1:
+    const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  $for M in range(2, 1 + 2 * ROW_TILE):
+    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+  float* o0 = output;
+  $for M in range(1, ROW_TILE):
+    float* o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    $for M in range(2, 1 + 2 * ROW_TILE):
+      if XNN_UNPREDICTABLE(padded_input_height < ${2 + M}) {
+        i${M} = zero;
+        $if M % 2 == 1:
+          o${(M - 1) / 2} = o${(M - 1) / 2 - 1};
+      }
+
+    $for M in range(1 + 2 * ROW_TILE):
+      v128_t vi${M}x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x89AB = wasm_v128_load(i${M});
+        const v128_t vi${M}xCDEF = wasm_v128_load(i${M} + 4);
+        i${M} += 8;
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x8ACE = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 0, 2, 4, 6);
+        const v128_t vi${M}x9BDF = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 1, 3, 5, 7);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x7BDF = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
+        vi${M}x1357 = vi${M}x9BDF;
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M}x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 5:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${2*M+1}x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0));
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p6 = wasm_f32x4_mul(vi${2*M+2}x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+        $else:
+          vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x89AB = wasm_v128_load(i${M});
+        const v128_t vi${M}xCDEF = wasm_v128_load(i${M} + 4);
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 0, 2, 4, 6));
+        const v128_t vi${M}x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 1, 3, 5, 7));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      $for M in range(1 + 2 * ROW_TILE):
+        const v128_t vi${M}x7BDF = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M}x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 5:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${2*M+1}x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0));
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p6 = wasm_f32x4_mul(vi${2*M+2}x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+        $else:
+          vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        $for M in reversed(range(ROW_TILE)):
+          wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0); o${M} += 2;
+
+          $for M in range(ROW_TILE):
+            vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *o${M} = wasm_f32x4_extract_lane(vo${M}, 0); o${M} += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i${2 * ROW_TILE} - input_decrement);
+    $for M in range(1, 1 + 2 * ROW_TILE):
+      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      o0 = o${ROW_TILE - 1};
+      $for M in range(1, ROW_TILE):
+        o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+    $if ROW_TILE > 1:
+      output_height = doz(output_height, ${ROW_TILE});
+      padded_input_height = doz(padded_input_height, ${ROW_TILE * 2});
+    $else:
+      output_height -= 1;
+      padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/5x5p2-wasmsimd-3x4.c.in b/src/f32-dwconv2d-chw/5x5p2-wasmsimd-3x4.c.in
deleted file mode 100644
index 3be96e9..0000000
--- a/src/f32-dwconv2d-chw/5x5p2-wasmsimd-3x4.c.in
+++ /dev/null
@@ -1,695 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-$ARCH_SUFFIX = "_x86" if X86 else "_arm"
-
-void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd${ARCH_SUFFIX}_3x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float *zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 2);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
-  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
-  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
-  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
-  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
-  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
-  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
-  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
-  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
-  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
-  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
-  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
-  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
-  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = zero;
-  const float* i2 = input;
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
-  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i3 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height <= 2) {
-      i4 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i5 = zero;
-    }
-    if XNN_UNPREDICTABLE(output_height <= 4) {
-      i6 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi5x0123 = vzero;
-    v128_t vi6x0123 = vzero;
-    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
-    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
-    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
-
-    size_t w = input_width;
-    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
-      v128_t vo4567p0 = vbias;
-      v128_t vo4567p1 = vbias;
-      v128_t vo4567p2 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
-      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
-      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x4567, vk02));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x4567, vk02));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x4567, vk12));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x4567, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x4567, vk12));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x4567, vk22));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x4567, vk22));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x4567, vk22));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x4567, vk32));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x4567, vk32));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x4567, vk32));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x4567, vk42));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x4567, vk42));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x4567, vk42));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk01));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk01));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk01));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x3456, vk11));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x3456, vk11));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x3456, vk11));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x3456, vk21));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x3456, vk21));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x3456, vk21));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x3456, vk31));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x3456, vk31));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x3456, vk31));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x3456, vk41));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x3456, vk41));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x3456, vk41));
-
-      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
-      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
-      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
-      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
-      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
-      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
-      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x2345, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x2345, vk00));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x2345, vk00));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x2345, vk10));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x2345, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x2345, vk10));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x2345, vk20));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x2345, vk20));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x2345, vk20));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x2345, vk30));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x2345, vk30));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x2345, vk30));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x2345, vk40));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x2345, vk40));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x2345, vk40));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk03));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk03));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk03));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x5678, vk13));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x5678, vk13));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x5678, vk13));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x5678, vk23));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x5678, vk23));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x5678, vk23));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x5678, vk33));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x5678, vk33));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x5678, vk33));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x5678, vk43));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x5678, vk43));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x5678, vk43));
-
-      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
-      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
-      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
-      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
-      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
-      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
-      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x6789, vk04));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x6789, vk04));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x6789, vk04));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x6789, vk14));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x6789, vk14));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x6789, vk14));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x6789, vk24));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x6789, vk24));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x6789, vk24));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x6789, vk34));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x6789, vk34));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x6789, vk34));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x6789, vk44));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x6789, vk44));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x6789, vk44));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-
-      v128_t vo0 = vo4567p0;
-      v128_t vo1 = vo4567p1;
-      v128_t vo2 = vo4567p2;
-
-      $if X86:
-        vo0 = wasm_v128_bitselect(vmin, vo0, wasm_f32x4_lt(vo0, vmin));
-        vo1 = wasm_v128_bitselect(vmin, vo1, wasm_f32x4_lt(vo1, vmin));
-        vo2 = wasm_v128_bitselect(vmin, vo2, wasm_f32x4_lt(vo2, vmin));
-        vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-        vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-        vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-      $else:
-        vo0 = wasm_f32x4_max(vo0, vmin);
-        vo1 = wasm_f32x4_max(vo1, vmin);
-        vo2 = wasm_f32x4_max(vo2, vmin);
-        vo0 = wasm_f32x4_min(vo0, vmax);
-        vo1 = wasm_f32x4_min(vo1, vmax);
-        vo2 = wasm_f32x4_min(vo2, vmax);
-
-      wasm_v128_store(o2, vo2); o2 += 4;
-      wasm_v128_store(o1, vo1); o1 += 4;
-      wasm_v128_store(o0, vo0); o0 += 4;
-    }
-    // Always process the last block of 5..8 pixels.
-    if XNN_LIKELY(w > 4 * sizeof(float)) {
-      v128_t vo4567p0 = vbias;
-      v128_t vo4567p1 = vbias;
-      v128_t vo4567p2 = vbias;
-
-      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
-      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
-      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
-      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
-      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
-      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
-      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
-
-      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
-      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
-      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
-      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
-      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
-      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
-      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x4567, vk02));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x4567, vk02));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x4567, vk12));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x4567, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x4567, vk12));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x4567, vk22));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x4567, vk22));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x4567, vk22));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x4567, vk32));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x4567, vk32));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x4567, vk32));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x4567, vk42));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x4567, vk42));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x4567, vk42));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk01));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk01));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk01));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x3456, vk11));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x3456, vk11));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x3456, vk11));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x3456, vk21));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x3456, vk21));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x3456, vk21));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x3456, vk31));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x3456, vk31));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x3456, vk31));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x3456, vk41));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x3456, vk41));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x3456, vk41));
-
-      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
-      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
-      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
-      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
-      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
-      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
-      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x2345, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x2345, vk00));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x2345, vk00));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x2345, vk10));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x2345, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x2345, vk10));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x2345, vk20));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x2345, vk20));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x2345, vk20));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x2345, vk30));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x2345, vk30));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x2345, vk30));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x2345, vk40));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x2345, vk40));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x2345, vk40));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk03));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk03));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk03));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x5678, vk13));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x5678, vk13));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x5678, vk13));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x5678, vk23));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x5678, vk23));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x5678, vk23));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x5678, vk33));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x5678, vk33));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x5678, vk33));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x5678, vk43));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x5678, vk43));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x5678, vk43));
-
-      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
-      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
-      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
-      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
-      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
-      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
-      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x6789, vk04));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x6789, vk04));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x6789, vk04));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x6789, vk14));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x6789, vk14));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x6789, vk14));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x6789, vk24));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x6789, vk24));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x6789, vk24));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x6789, vk34));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x6789, vk34));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x6789, vk34));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x6789, vk44));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x6789, vk44));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x6789, vk44));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-
-      v128_t vo0 = vo4567p0;
-      v128_t vo1 = vo4567p1;
-      v128_t vo2 = vo4567p2;
-
-      $if X86:
-        vo0 = wasm_v128_bitselect(vmin, vo0, wasm_f32x4_lt(vo0, vmin));
-        vo1 = wasm_v128_bitselect(vmin, vo1, wasm_f32x4_lt(vo1, vmin));
-        vo2 = wasm_v128_bitselect(vmin, vo2, wasm_f32x4_lt(vo2, vmin));
-        vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-        vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-        vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-      $else:
-        vo0 = wasm_f32x4_max(vo0, vmin);
-        vo1 = wasm_f32x4_max(vo1, vmin);
-        vo2 = wasm_f32x4_max(vo2, vmin);
-        vo0 = wasm_f32x4_min(vo0, vmax);
-        vo1 = wasm_f32x4_min(vo1, vmax);
-        vo2 = wasm_f32x4_min(vo2, vmax);
-
-      wasm_v128_store(o2, vo2); o2 += 4;
-      wasm_v128_store(o1, vo1); o1 += 4;
-      wasm_v128_store(o0, vo0); o0 += 4;
-      w -= 4 * sizeof(float);
-    }
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      v128_t vo4567p0 = vbias;
-      v128_t vo4567p1 = vbias;
-      v128_t vo4567p2 = vbias;
-
-      // This might have already happened if there are more than 4 pixels, but we can't count on it.
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
-      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x4567, vk02));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x4567, vk02));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x4567, vk12));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x4567, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x4567, vk12));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x4567, vk22));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x4567, vk22));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x4567, vk22));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x4567, vk32));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x4567, vk32));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x4567, vk32));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x4567, vk42));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x4567, vk42));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x4567, vk42));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk01));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk01));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk01));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x3456, vk11));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x3456, vk11));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x3456, vk11));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x3456, vk21));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x3456, vk21));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x3456, vk21));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x3456, vk31));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x3456, vk31));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x3456, vk31));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x3456, vk41));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x3456, vk41));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x3456, vk41));
-
-      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
-      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
-      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
-      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
-      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
-      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
-      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x2345, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x2345, vk00));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x2345, vk00));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x2345, vk10));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x2345, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x2345, vk10));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x2345, vk20));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x2345, vk20));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x2345, vk20));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x2345, vk30));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x2345, vk30));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x2345, vk30));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x2345, vk40));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x2345, vk40));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x2345, vk40));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk03));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk03));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk03));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x5678, vk13));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x5678, vk13));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x5678, vk13));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x5678, vk23));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x5678, vk23));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x5678, vk23));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x5678, vk33));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x5678, vk33));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x5678, vk33));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x5678, vk43));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x5678, vk43));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x5678, vk43));
-
-      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vzero, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x6789, vk04));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x6789, vk04));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x6789, vk04));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x6789, vk14));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x6789, vk14));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x6789, vk14));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x6789, vk24));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x6789, vk24));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x6789, vk24));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x6789, vk34));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x6789, vk34));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x6789, vk34));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x6789, vk44));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x6789, vk44));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x6789, vk44));
-
-      v128_t vo0 = vo4567p0;
-      v128_t vo1 = vo4567p1;
-      v128_t vo2 = vo4567p2;
-
-      vo0 = wasm_f32x4_max(vo0, vmin);
-      vo1 = wasm_f32x4_max(vo1, vmin);
-      vo2 = wasm_f32x4_max(vo2, vmin);
-
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-
-      if XNN_LIKELY(w & (4 * sizeof(float))) {
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-    i5 = (const float*) ((uintptr_t) i4 + input_width);
-    i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-    o0 = o2;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-
-    output_height = doz(output_height, 3);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in b/src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
new file mode 100644
index 0000000..64c1411
--- /dev/null
+++ b/src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
@@ -0,0 +1,537 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+$ARCH_SUFFIX = "_x86" if X86 else "_arm"
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd${ARCH_SUFFIX}_loadsplat_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  $for M in range(3, 4 + ROW_TILE):
+    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+  float* o0 = output;
+  $for M in range(1, ROW_TILE):
+    float* o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+  size_t output_height = input_height;
+  do {
+    $for M in range(2, 3 + ROW_TILE):
+      if XNN_UNPREDICTABLE(output_height < ${M}) {
+        i${M+1} = zero;
+        $if M <= ROW_TILE:
+          o${M-1} = o${M-2};
+      }
+
+    $for M in range(4 + ROW_TILE):
+      v128_t vi${M}x0123 = vzero;
+
+    $for M in range(4 + ROW_TILE):
+      v128_t vi${M}x4567 = wasm_v128_load(i${M}); i${M} += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = vbias;
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x89AB = wasm_v128_load(i${M}); i${M} += 4;
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${M}x4567, vk02);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, vk02));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+1}x4567, vk12);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, vk12));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${M+2}x4567, vk22);
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x4567, vk22));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${M+3}x4567, vk32);
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x4567, vk32));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${M+4}x4567, vk42);
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x4567, vk42));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, vk01));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, vk11));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, vk21));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x3456, vk31));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x3456, vk41));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x2345 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 2, 3, 4, 5);
+        vi${M}x0123 = vi${M}x4567;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x2345, vk00));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x2345, vk10));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x2345, vk20));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x2345, vk30));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x2345, vk40));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, vk03));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, vk13));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, vk23));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x5678, vk33));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x5678, vk43));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x6789 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 2, 3, 4, 5);
+        vi${M}x4567 = vi${M}x89AB;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x6789, vk04));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x6789, vk14));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x6789, vk24));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x6789, vk34));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x6789, vk44));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = vbias;
+
+      $for M in range(4 + ROW_TILE):
+        v128_t vi${M}x89AB = wasm_v128_load(i${M}); i${M} += 4;
+
+      $for M in range(4 + ROW_TILE):
+        vi${M}x89AB = wasm_v128_and(vmask, vi${M}x89AB);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${M}x4567, vk02);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, vk02));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+1}x4567, vk12);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, vk12));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${M+2}x4567, vk22);
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x4567, vk22));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${M+3}x4567, vk32);
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x4567, vk32));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${M+4}x4567, vk42);
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x4567, vk42));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, vk01));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, vk11));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, vk21));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x3456, vk31));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x3456, vk41));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x2345 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 2, 3, 4, 5);
+        vi${M}x0123 = vi${M}x4567;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x2345, vk00));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x2345, vk10));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x2345, vk20));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x2345, vk30));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x2345, vk40));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, vk03));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, vk13));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, vk23));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x5678, vk33));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x5678, vk43));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x6789 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 2, 3, 4, 5);
+        vi${M}x4567 = vi${M}x89AB;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x6789, vk04));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x6789, vk14));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x6789, vk24));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x6789, vk34));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x6789, vk44));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = vbias;
+
+      $for M in range(4 + ROW_TILE):
+        vi${M}x4567 = wasm_v128_and(vmask, vi${M}x4567);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${M}x4567, vk02);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, vk02));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+1}x4567, vk12);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, vk12));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${M+2}x4567, vk22);
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x4567, vk22));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${M+3}x4567, vk32);
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x4567, vk32));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${M+4}x4567, vk42);
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x4567, vk42));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, vk01));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, vk11));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, vk21));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x3456, vk31));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x3456, vk41));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x2345 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 2, 3, 4, 5);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x2345, vk00));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x2345, vk10));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x2345, vk20));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x2345, vk30));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x2345, vk40));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vzero, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, vk03));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, vk13));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, vk23));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x5678, vk33));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x5678, vk43));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x6789 = wasm_v32x4_shuffle(vi${M}x5678, vzero, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x6789, vk04));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x6789, vk14));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x6789, vk24));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x6789, vk34));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x6789, vk44));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        $for M in reversed(range(ROW_TILE)):
+          wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0); o${M} += 2;
+
+          $for M in range(ROW_TILE):
+            vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *o${M} = wasm_f32x4_extract_lane(vo${M}, 0); o${M} += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i${ROW_TILE} - input_decrement);
+    i1 = (const float*) ((uintptr_t) i${ROW_TILE+1} - input_decrement);
+    $for M in range(2, 4 + ROW_TILE):
+      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      o0 = o${ROW_TILE - 1};
+      $for M in range(1, ROW_TILE):
+        o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      output_height = doz(output_height, ${ROW_TILE});
+  } while (${"--" if ROW_TILE == 1 else ""}output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in b/src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
new file mode 100644
index 0000000..e24b509
--- /dev/null
+++ b/src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
@@ -0,0 +1,511 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+$ARCH_SUFFIX = "_x86" if X86 else "_arm"
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd${ARCH_SUFFIX}_splat_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  $for M in range(3, 4 + ROW_TILE):
+    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+  float* o0 = output;
+  $for M in range(1, ROW_TILE):
+    float* o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+  size_t output_height = input_height;
+  do {
+    $for M in range(2, 3 + ROW_TILE):
+      if XNN_UNPREDICTABLE(output_height < ${M}) {
+        i${M+1} = zero;
+        $if M <= ROW_TILE:
+          o${M-1} = o${M-2};
+      }
+
+    $for M in range(4 + ROW_TILE):
+      v128_t vi${M}x0123 = vzero;
+
+    $for M in range(4 + ROW_TILE):
+      v128_t vi${M}x4567 = wasm_v128_load(i${M}); i${M} += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x89AB = wasm_v128_load(i${M}); i${M} += 4;
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${M+3}x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${M+4}x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3));
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x2345 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 2, 3, 4, 5);
+        vi${M}x0123 = vi${M}x4567;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x6789 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 2, 3, 4, 5);
+        vi${M}x4567 = vi${M}x89AB;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      $for M in range(4 + ROW_TILE):
+        v128_t vi${M}x89AB = wasm_v128_load(i${M}); i${M} += 4;
+
+      $for M in range(4 + ROW_TILE):
+        vi${M}x89AB = wasm_v128_and(vmask, vi${M}x89AB);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${M+3}x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${M+4}x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3));
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x2345 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 2, 3, 4, 5);
+        vi${M}x0123 = vi${M}x4567;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x6789 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 2, 3, 4, 5);
+        vi${M}x4567 = vi${M}x89AB;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      $for M in range(4 + ROW_TILE):
+        vi${M}x4567 = wasm_v128_and(vmask, vi${M}x4567);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${M+3}x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 6:
+          v128_t vo${M}p5 = wasm_f32x4_mul(vi${M+4}x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3));
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x2345 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 2, 3, 4, 5);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vzero, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      $for M in range(4 + ROW_TILE):
+        const v128_t vi${M}x6789 = wasm_v32x4_shuffle(vi${M}x5678, vzero, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+3}x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+4}x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        $for M in reversed(range(ROW_TILE)):
+          wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0); o${M} += 2;
+
+          $for M in range(ROW_TILE):
+            vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          $for M in reversed(range(ROW_TILE)):
+            *o${M} = wasm_f32x4_extract_lane(vo${M}, 0); o${M} += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i${ROW_TILE} - input_decrement);
+    i1 = (const float*) ((uintptr_t) i${ROW_TILE+1} - input_decrement);
+    $for M in range(2, 4 + ROW_TILE):
+      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      o0 = o${ROW_TILE - 1};
+      $for M in range(1, ROW_TILE):
+        o${M} = (float*) ((uintptr_t) o${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      output_height = doz(output_height, ${ROW_TILE});
+  } while (${"--" if ROW_TILE == 1 else ""}output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-1x4-acc2.c.in b/src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-1x4-acc2.c.in
deleted file mode 100644
index 8e8df67..0000000
--- a/src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-1x4-acc2.c.in
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-$ARCH_SUFFIX = "_x86" if X86 else "_arm"
-
-void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd${ARCH_SUFFIX}_1x4_acc2(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top >= 1);
-  assert(padding_top <= 2);
-
-  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
-  const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
-  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
-  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
-  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
-  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
-  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
-  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
-  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
-  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
-  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
-  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
-  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
-  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
-  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const uint32_t padding_top_less_1 = padding_top - 1;
-  const size_t input_decrement = round_down_po2(input_width - 1 * sizeof(float), 4 * sizeof(float)) + 4 * sizeof(float);
-
-  const float* i0 = zero;
-  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
-    i1 = zero;
-  }
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
-  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
-  do {
-    if XNN_UNPREDICTABLE(padded_input_height <= 6) {
-      i4 = zero;
-    }
-    if XNN_UNPREDICTABLE(padded_input_height < 6) {
-      i3 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-
-    size_t w = input_width;
-    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
-      v128_t vo468Ap0 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-
-      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
-      i0 += 8;
-      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
-      i1 += 8;
-      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
-      i2 += 8;
-      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
-      i3 += 8;
-      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
-      i4 += 8;
-
-      const v128_t vi0x468A = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi0x579B = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi1x468A = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi1x579B = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi2x468A = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi2x579B = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi3x468A = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi3x579B = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi4x468A = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi4x579B = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 3, 4 + 1, 4 + 3);
-
-      // middle tap
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x468A, vk02));
-      v128_t vo468Ap1 = wasm_f32x4_mul(vi1x468A, vk12);
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x468A, vk22));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x468A, vk32));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x468A, vk42));
-
-      // one left
-      const v128_t vi0x3579 = wasm_v32x4_shuffle(vi0x0123, vi0x579B, 3, 4, 5, 6);
-      const v128_t vi1x3579 = wasm_v32x4_shuffle(vi1x0123, vi1x579B, 3, 4, 5, 6);
-      const v128_t vi2x3579 = wasm_v32x4_shuffle(vi2x0123, vi2x579B, 3, 4, 5, 6);
-      const v128_t vi3x3579 = wasm_v32x4_shuffle(vi3x0123, vi3x579B, 3, 4, 5, 6);
-      const v128_t vi4x3579 = wasm_v32x4_shuffle(vi4x0123, vi4x579B, 3, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x3579, vk01));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x3579, vk11));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x3579, vk21));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x3579, vk31));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x3579, vk41));
-
-      // two left
-      const v128_t vi0x2468 = wasm_v32x4_shuffle(vi0x0123, vi0x468A, 2, 4, 5, 6);
-      const v128_t vi1x2468 = wasm_v32x4_shuffle(vi1x0123, vi1x468A, 2, 4, 5, 6);
-      const v128_t vi2x2468 = wasm_v32x4_shuffle(vi2x0123, vi2x468A, 2, 4, 5, 6);
-      const v128_t vi3x2468 = wasm_v32x4_shuffle(vi3x0123, vi3x468A, 2, 4, 5, 6);
-      const v128_t vi4x2468 = wasm_v32x4_shuffle(vi4x0123, vi4x468A, 2, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x2468, vk00));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x2468, vk10));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x2468, vk20));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x2468, vk30));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x2468, vk40));
-
-      vi0x0123 = vi0x89AB;
-      vi1x0123 = vi1x89AB;
-      vi2x0123 = vi2x89AB;
-      vi3x0123 = vi3x89AB;
-      vi4x0123 = vi4x89AB;
-
-      // one right
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x579B, vk03));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x579B, vk13));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x579B, vk23));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x579B, vk33));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x579B, vk43));
-
-      // two right
-      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x468A, vi0xCDEF, 1, 2, 3, 4);
-      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x468A, vi1xCDEF, 1, 2, 3, 4);
-      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x468A, vi2xCDEF, 1, 2, 3, 4);
-      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x468A, vi3xCDEF, 1, 2, 3, 4);
-      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x468A, vi4xCDEF, 1, 2, 3, 4);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x68AC, vk04));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x68AC, vk14));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x68AC, vk24));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x68AC, vk34));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x68AC, vk44));
-
-      vi0x4567 = vi0xCDEF;
-      vi1x4567 = vi1xCDEF;
-      vi2x4567 = vi2xCDEF;
-      vi3x4567 = vi3xCDEF;
-      vi4x4567 = vi4xCDEF;
-
-      v128_t vo0 = wasm_f32x4_add(vo468Ap0, vo468Ap1);
-
-      $if X86:
-        vo0 = wasm_v128_bitselect(vmin, vo0, wasm_f32x4_lt(vo0, vmin));
-        vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      $else:
-        vo0 = wasm_f32x4_max(vo0, vmin);
-        vo0 = wasm_f32x4_min(vo0, vmax);
-
-      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
-      if XNN_LIKELY(w_tmp >= 4) {
-        wasm_v128_store(output, vo0);
-        output += 4;
-      } else {
-        if (w_tmp & 2) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo0, 0);
-          output += 2;
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w_tmp & 1) {
-          *output = wasm_f32x4_extract_lane(vo0, 0);
-          output += 1;
-        }
-      }
-    }
-
-    {
-      v128_t vo468Ap0 = vbias;
-
-      v128_t vi0x89AB = vzero;
-      v128_t vi1x89AB = vzero;
-      v128_t vi2x89AB = vzero;
-      v128_t vi3x89AB = vzero;
-      v128_t vi4x89AB = vzero;
-      if XNN_LIKELY(w > 4 * sizeof(float)) {
-        vi0x89AB = wasm_v128_load(i0);
-        i0 += 4;
-        vi1x89AB = wasm_v128_load(i1);
-        i1 += 4;
-        vi2x89AB = wasm_v128_load(i2);
-        i2 += 4;
-        vi3x89AB = wasm_v128_load(i3);
-        i3 += 4;
-        vi4x89AB = wasm_v128_load(i4);
-        i4 += 4;
-      }
-
-      v128_t vi0xCDEF = vzero;
-      v128_t vi1xCDEF = vzero;
-      v128_t vi2xCDEF = vzero;
-      v128_t vi3xCDEF = vzero;
-      v128_t vi4xCDEF = vzero;
-      if XNN_LIKELY(w > 8 * sizeof(float)) {
-        vi0xCDEF = wasm_v128_load(i0);
-        i0 += 4;
-        vi1xCDEF = wasm_v128_load(i1);
-        i1 += 4;
-        vi2xCDEF = wasm_v128_load(i2);
-        i2 += 4;
-        vi3xCDEF = wasm_v128_load(i3);
-        i3 += 4;
-        vi4xCDEF = wasm_v128_load(i4);
-        i4 += 4;
-      }
-
-      v128_t vi0x468A = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi0x579B = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi1x468A = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi1x579B = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi2x468A = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi2x579B = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi3x468A = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi3x579B = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi4x468A = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi4x579B = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 3, 4 + 1, 4 + 3);
-
-      vi0x468A = wasm_v128_and(vmask_even, vi0x468A);
-      vi1x468A = wasm_v128_and(vmask_even, vi1x468A);
-      vi2x468A = wasm_v128_and(vmask_even, vi2x468A);
-      vi3x468A = wasm_v128_and(vmask_even, vi3x468A);
-      vi4x468A = wasm_v128_and(vmask_even, vi4x468A);
-
-      vi0x579B = wasm_v128_and(vmask_odd, vi0x579B);
-      vi1x579B = wasm_v128_and(vmask_odd, vi1x579B);
-      vi2x579B = wasm_v128_and(vmask_odd, vi2x579B);
-      vi3x579B = wasm_v128_and(vmask_odd, vi3x579B);
-      vi4x579B = wasm_v128_and(vmask_odd, vi4x579B);
-
-      // middle tap
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x468A, vk02));
-      v128_t vo468Ap1 = wasm_f32x4_mul(vi1x468A, vk12);
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x468A, vk22));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x468A, vk32));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x468A, vk42));
-
-      // one left
-      const v128_t vi0x3579 = wasm_v32x4_shuffle(vi0x0123, vi0x579B, 3, 4, 5, 6);
-      const v128_t vi1x3579 = wasm_v32x4_shuffle(vi1x0123, vi1x579B, 3, 4, 5, 6);
-      const v128_t vi2x3579 = wasm_v32x4_shuffle(vi2x0123, vi2x579B, 3, 4, 5, 6);
-      const v128_t vi3x3579 = wasm_v32x4_shuffle(vi3x0123, vi3x579B, 3, 4, 5, 6);
-      const v128_t vi4x3579 = wasm_v32x4_shuffle(vi4x0123, vi4x579B, 3, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x3579, vk01));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x3579, vk11));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x3579, vk21));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x3579, vk31));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x3579, vk41));
-
-      // two left
-      const v128_t vi0x2468 = wasm_v32x4_shuffle(vi0x0123, vi0x468A, 2, 4, 5, 6);
-      const v128_t vi1x2468 = wasm_v32x4_shuffle(vi1x0123, vi1x468A, 2, 4, 5, 6);
-      const v128_t vi2x2468 = wasm_v32x4_shuffle(vi2x0123, vi2x468A, 2, 4, 5, 6);
-      const v128_t vi3x2468 = wasm_v32x4_shuffle(vi3x0123, vi3x468A, 2, 4, 5, 6);
-      const v128_t vi4x2468 = wasm_v32x4_shuffle(vi4x0123, vi4x468A, 2, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x2468, vk00));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x2468, vk10));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x2468, vk20));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x2468, vk30));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x2468, vk40));
-
-      vi0x0123 = vi0x89AB;
-      vi1x0123 = vi1x89AB;
-      vi2x0123 = vi2x89AB;
-      vi3x0123 = vi3x89AB;
-      vi4x0123 = vi4x89AB;
-
-      // one right
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x579B, vk03));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x579B, vk13));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x579B, vk23));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x579B, vk33));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x579B, vk43));
-
-      // two right
-      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x468A, vi0xCDEF, 1, 2, 3, 4);
-      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x468A, vi1xCDEF, 1, 2, 3, 4);
-      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x468A, vi2xCDEF, 1, 2, 3, 4);
-      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x468A, vi3xCDEF, 1, 2, 3, 4);
-      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x468A, vi4xCDEF, 1, 2, 3, 4);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x68AC, vk04));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x68AC, vk14));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x68AC, vk24));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x68AC, vk34));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x68AC, vk44));
-
-      vi0x4567 = vi0xCDEF;
-      vi1x4567 = vi1xCDEF;
-      vi2x4567 = vi2xCDEF;
-      vi3x4567 = vi3xCDEF;
-      vi4x4567 = vi4xCDEF;
-
-      v128_t vo0 = wasm_f32x4_add(vo468Ap0, vo468Ap1);
-
-      $if X86:
-        vo0 = wasm_v128_bitselect(vmin, vo0, wasm_f32x4_lt(vo0, vmin));
-        vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      $else:
-        vo0 = wasm_f32x4_max(vo0, vmin);
-        vo0 = wasm_f32x4_min(vo0, vmax);
-
-      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
-      if XNN_LIKELY(w_tmp >= 4) {
-        wasm_v128_store(output, vo0);
-        output += 4;
-      } else {
-        if (w_tmp & 2) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo0, 0);
-          output += 2;
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w_tmp & 1) {
-          *output = wasm_f32x4_extract_lane(vo0, 0);
-          output += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-    output_height -= 1;
-    padded_input_height -= 2;
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in b/src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
new file mode 100644
index 0000000..72c77c0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
@@ -0,0 +1,422 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+$ARCH_SUFFIX = "_x86" if X86 else "_arm"
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd${ARCH_SUFFIX}_loadsplat_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  $for M in range(3, 3 + 2 * ROW_TILE):
+    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+  $if ROW_TILE > 1:
+    const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  $for M in range(1, ROW_TILE):
+    float* o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    $for M in range(3, 3 + 2 * ROW_TILE):
+      if XNN_UNPREDICTABLE(padded_input_height < ${3 + M}) {
+        i${M} = zero;
+        $if M % 2 == 0 and M <= 2 * ROW_TILE + 1:
+          o${M / 2 - 1} = o${M / 2 - 2};
+      }
+
+    $for M in range(3 + 2 * ROW_TILE):
+      v128_t vi${M}x0246 = vzero;
+
+    $for M in range(3 + 2 * ROW_TILE):
+      v128_t vi${M}x1357 = vzero;
+
+    $for M in range(3 + 2 * ROW_TILE):
+      const v128_t vi${M}x89AB = wasm_v128_load(i${M});
+      const v128_t vi${M}xCDEF = wasm_v128_load(i${M} + 4);
+      i${M} += 8;
+
+    $for M in range(3 + 2 * ROW_TILE):
+      v128_t vi${M}x8ACE = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 0, 2, 4, 6);
+      v128_t vi${M}x9BDF = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = vbias;
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, vk02);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, vk02));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, vk12);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, vk12));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, vk22);
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x8ACE, vk22));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M+3}x8ACE, vk32);
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x8ACE, vk32));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 5:
+          vo${M}p5 = wasm_f32x4_mul(vi${2*M+4}x8ACE, vk42);
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x8ACE, vk42));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9BDF, vk03));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x9BDF, vk13));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x9BDF, vk23));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x9BDF, vk33));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x9BDF, vk43));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}x68AC = wasm_v32x4_shuffle(vi${M}x0246, vi${M}x8ACE, 3, 4, 5, 6);
+        vi${M}x0246 = vi${M}x8ACE;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x68AC, vk00));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x68AC, vk10));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x68AC, vk20));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x68AC, vk30));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x68AC, vk40));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}x79BD = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
+        vi${M}x1357 = vi${M}x9BDF;
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}xGHIJ = wasm_v128_load(i${M});
+        const v128_t vi${M}xKLMN = wasm_v128_load(i${M} + 4);
+        i${M} += 8;
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}xGIKM = wasm_v32x4_shuffle(vi${M}xGHIJ, vi${M}xKLMN, 0, 2, 4, 6);
+        const v128_t vi${M}xHJLN = wasm_v32x4_shuffle(vi${M}xGHIJ, vi${M}xKLMN, 1, 3, 5, 7);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x79BD, vk01));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x79BD, vk11));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x79BD, vk21));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x79BD, vk31));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x79BD, vk41));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}xACEG = wasm_v32x4_shuffle(vi${M}x8ACE, vi${M}xGIKM, 1, 2, 3, 4);
+        vi${M}x8ACE = vi${M}xGIKM;
+        vi${M}x9BDF = vi${M}xHJLN;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}xACEG, vk04));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}xACEG, vk14));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}xACEG, vk24));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}xACEG, vk34));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}xACEG, vk44));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = vbias;
+
+      $for M in range(3 + 2 * ROW_TILE):
+        vi${M}x8ACE = wasm_v128_and(vmask_even, vi${M}x8ACE);
+
+      $for M in range(3 + 2 * ROW_TILE):
+        vi${M}x9BDF = wasm_v128_and(vmask_odd, vi${M}x9BDF);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, vk02);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, vk02));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, vk12);
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, vk12));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, vk22);
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x8ACE, vk22));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M+3}x8ACE, vk32);
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x8ACE, vk32));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 5:
+          vo${M}p5 = wasm_f32x4_mul(vi${2*M+4}x8ACE, vk42);
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x8ACE, vk42));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9BDF, vk03));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x9BDF, vk13));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x9BDF, vk23));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x9BDF, vk33));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x9BDF, vk43));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}x68AC = wasm_v32x4_shuffle(vi${M}x0246, vi${M}x8ACE, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x68AC, vk00));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x68AC, vk10));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x68AC, vk20));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x68AC, vk30));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x68AC, vk40));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}x79BD = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x79BD, vk01));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x79BD, vk11));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x79BD, vk21));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x79BD, vk31));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x79BD, vk41));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}xACEG = wasm_v32x4_shuffle(vi${M}x8ACE, vzero, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}xACEG, vk04));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}xACEG, vk14));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}xACEG, vk24));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}xACEG, vk34));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}xACEG, vk44));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        $for M in reversed(range(ROW_TILE)):
+          wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+      } else {
+        if (w_tmp & 2) {
+          $for M in reversed(range(ROW_TILE)):
+            *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0); o${M} += 2;
+
+          $for M in range(ROW_TILE):
+            vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          $for M in reversed(range(ROW_TILE)):
+            *o${M} = wasm_f32x4_extract_lane(vo${M}, 0); o${M} += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i${2 * ROW_TILE} - input_decrement);
+    i1 = (const float*) ((uintptr_t) i${2 * ROW_TILE + 1} - input_decrement);
+    i2 = (const float*) ((uintptr_t) i${2 * ROW_TILE + 2} - input_decrement);
+    $for M in range(3, 3 + 2 * ROW_TILE):
+      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      o0 = o${ROW_TILE - 1};
+      $for M in range(1, ROW_TILE):
+        o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+    $if ROW_TILE > 1:
+      output_height = doz(output_height, ${ROW_TILE});
+      padded_input_height = doz(padded_input_height, ${ROW_TILE * 2});
+    $else:
+      output_height -= 1;
+      padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in b/src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
new file mode 100644
index 0000000..707e506
--- /dev/null
+++ b/src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
@@ -0,0 +1,402 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert ROW_TILE >= 1
+$assert ACCUMULATORS >= 1
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+$ARCH_SUFFIX = "_x86" if X86 else "_arm"
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd${ARCH_SUFFIX}_splat_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  $for M in range(3, 3 + 2 * ROW_TILE):
+    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+  $if ROW_TILE > 1:
+    const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  $for M in range(1, ROW_TILE):
+    float* o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    $for M in range(3, 3 + 2 * ROW_TILE):
+      if XNN_UNPREDICTABLE(padded_input_height < ${3 + M}) {
+        i${M} = zero;
+        $if M % 2 == 0 and M <= 2 * ROW_TILE + 1:
+          o${M / 2 - 1} = o${M / 2 - 2};
+      }
+
+    $for M in range(3 + 2 * ROW_TILE):
+      v128_t vi${M}x0246 = vzero;
+
+    $for M in range(3 + 2 * ROW_TILE):
+      v128_t vi${M}x1357 = vzero;
+
+    $for M in range(3 + 2 * ROW_TILE):
+      const v128_t vi${M}x89AB = wasm_v128_load(i${M});
+      const v128_t vi${M}xCDEF = wasm_v128_load(i${M} + 4);
+      i${M} += 8;
+
+    $for M in range(3 + 2 * ROW_TILE):
+      v128_t vi${M}x8ACE = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 0, 2, 4, 6);
+      v128_t vi${M}x9BDF = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M+3}x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 5:
+          vo${M}p5 = wasm_f32x4_mul(vi${2*M+4}x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3));
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}x68AC = wasm_v32x4_shuffle(vi${M}x0246, vi${M}x8ACE, 3, 4, 5, 6);
+        vi${M}x0246 = vi${M}x8ACE;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}x79BD = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
+        vi${M}x1357 = vi${M}x9BDF;
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}xGHIJ = wasm_v128_load(i${M});
+        const v128_t vi${M}xKLMN = wasm_v128_load(i${M} + 4);
+        i${M} += 8;
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}xGIKM = wasm_v32x4_shuffle(vi${M}xGHIJ, vi${M}xKLMN, 0, 2, 4, 6);
+        const v128_t vi${M}xHJLN = wasm_v32x4_shuffle(vi${M}xGHIJ, vi${M}xKLMN, 1, 3, 5, 7);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}xACEG = wasm_v32x4_shuffle(vi${M}x8ACE, vi${M}xGIKM, 1, 2, 3, 4);
+        vi${M}x8ACE = vi${M}xGIKM;
+        vi${M}x9BDF = vi${M}xHJLN;
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      $for M in reversed(range(ROW_TILE)):
+        wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      $for M in range(ROW_TILE):
+        v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      $for M in range(3 + 2 * ROW_TILE):
+        vi${M}x8ACE = wasm_v128_and(vmask_even, vi${M}x8ACE);
+
+      $for M in range(3 + 2 * ROW_TILE):
+        vi${M}x9BDF = wasm_v128_and(vmask_odd, vi${M}x9BDF);
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 1:
+          v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 2:
+          v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+        $else:
+          vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 3:
+          v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+        $else:
+          vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 4:
+          v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M+3}x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+        $else:
+          vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        $if ACCUMULATORS > 5:
+          vo${M}p5 = wasm_f32x4_mul(vi${2*M+4}x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3));
+        $else:
+          vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}x68AC = wasm_v32x4_shuffle(vi${M}x0246, vi${M}x8ACE, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}x79BD = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      $for M in range(3 + 2 * ROW_TILE):
+        const v128_t vi${M}xACEG = wasm_v32x4_shuffle(vi${M}x8ACE, vzero, 1, 2, 3, 4);
+
+      $for M in range(ROW_TILE):
+        vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      $for M in range(ROW_TILE):
+        vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      $if ACCUMULATORS > 1:
+        $ACC_SLICE = 1
+        $while ACC_SLICE < ACCUMULATORS:
+          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
+            $if A + ACC_SLICE < ACCUMULATORS:
+              $for M in range(ROW_TILE):
+                vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE});
+          $ACC_SLICE *= 2
+
+      $if X86:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_v128_bitselect(vmin, vo${M}p0, wasm_f32x4_lt(vo${M}p0, vmin));
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_v128_bitselect(vo${M}, vmax, wasm_f32x4_le(vo${M}, vmax));
+      $else:
+        $for M in range(ROW_TILE):
+          v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin);
+        $for M in range(ROW_TILE):
+          vo${M} = wasm_f32x4_min(vo${M}, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        $for M in reversed(range(ROW_TILE)):
+          wasm_v128_store(o${M}, vo${M}); o${M} += 4;
+      } else {
+        if (w_tmp & 2) {
+          $for M in reversed(range(ROW_TILE)):
+            *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0); o${M} += 2;
+
+          $for M in range(ROW_TILE):
+            vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          $for M in reversed(range(ROW_TILE)):
+            *o${M} = wasm_f32x4_extract_lane(vo${M}, 0); o${M} += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i${2 * ROW_TILE} - input_decrement);
+    i1 = (const float*) ((uintptr_t) i${2 * ROW_TILE + 1} - input_decrement);
+    i2 = (const float*) ((uintptr_t) i${2 * ROW_TILE + 2} - input_decrement);
+    $for M in range(3, 3 + 2 * ROW_TILE):
+      i${M} = (const float*) ((uintptr_t) i${M-1} + input_width);
+
+    $if ROW_TILE > 1:
+      o0 = o${ROW_TILE - 1};
+      $for M in range(1, ROW_TILE):
+        o${M} = (float*) ((uintptr_t) o${M-1} + output_width);
+
+    $if ROW_TILE > 1:
+      output_height = doz(output_height, ${ROW_TILE});
+      padded_input_height = doz(padded_input_height, ${ROW_TILE * 2});
+    $else:
+      output_height -= 1;
+      padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc2.c
deleted file mode 100644
index b12af8e..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc2.c
+++ /dev/null
@@ -1,178 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc2(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  float* o0 = output;
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-
-  } while (--output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc3.c
deleted file mode 100644
index a300ea5..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc3.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  float* o0 = output;
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-
-  } while (--output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc4.c
deleted file mode 100644
index d53d68d..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4-acc4.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  float* o0 = output;
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-
-  } while (--output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4.c
deleted file mode 100644
index cb12f4b..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-1x4.c
+++ /dev/null
@@ -1,176 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  float* o0 = output;
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-
-  } while (--output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4-acc2.c
deleted file mode 100644
index 14627b0..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4-acc2.c
+++ /dev/null
@@ -1,233 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-
-    o0 = o1;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-
-    output_height = doz(output_height, 2);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4.c
deleted file mode 100644
index 4b21fb5..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-2x4.c
+++ /dev/null
@@ -1,229 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-
-    o0 = o1;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-
-    output_height = doz(output_height, 2);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-3x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-3x4.c
deleted file mode 100644
index 2ca9f9d..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-3x4.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i4 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-      i4 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-
-      wasm_v128_store(o2, vo2);
-      o2 += 4;
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-    o0 = o2;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-
-    output_height = doz(output_height, 3);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-4x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-4x4.c
deleted file mode 100644
index 4e401d7..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-4x4.c
+++ /dev/null
@@ -1,331 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-  float* o3 = (float*) ((uintptr_t) o2 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i4 = zero;
-      o3 = o2;
-    }
-    if XNN_UNPREDICTABLE(output_height < 5) {
-      i5 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi5x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-    v128_t vi5x4567 = wasm_v128_load(i5);
-    i5 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-      i4 += 4;
-      const v128_t vi5x89AB = wasm_v128_load(i5);
-      i5 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
-      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-      vo3 = wasm_f32x4_min(vo3, vmax);
-
-      wasm_v128_store(o3, vo3);
-      o3 += 4;
-      wasm_v128_store(o2, vo2);
-      o2 += 4;
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
-      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-      vo3 = wasm_f32x4_min(vo3, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o3, vo3);
-        o3 += 4;
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
-          o3 += 2;
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o3 = wasm_f32x4_extract_lane(vo3, 0);
-          o3 += 1;
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-    i5 = (const float*) ((uintptr_t) i4 + input_width);
-
-    o0 = o3;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-    o3 = (float*) ((uintptr_t) o2 + input_width);
-
-    output_height = doz(output_height, 4);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-5x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-5x4.c
deleted file mode 100644
index 75898be..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-5x4.c
+++ /dev/null
@@ -1,382 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
-  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-  float* o3 = (float*) ((uintptr_t) o2 + input_width);
-  float* o4 = (float*) ((uintptr_t) o3 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i4 = zero;
-      o3 = o2;
-    }
-    if XNN_UNPREDICTABLE(output_height < 5) {
-      i5 = zero;
-      o4 = o3;
-    }
-    if XNN_UNPREDICTABLE(output_height < 6) {
-      i6 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi5x0123 = vzero;
-    v128_t vi6x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-    v128_t vi5x4567 = wasm_v128_load(i5);
-    i5 += 4;
-    v128_t vi6x4567 = wasm_v128_load(i6);
-    i6 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-      i4 += 4;
-      const v128_t vi5x89AB = wasm_v128_load(i5);
-      i5 += 4;
-      const v128_t vi6x89AB = wasm_v128_load(i6);
-      i6 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
-      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
-      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-      vo3 = wasm_f32x4_min(vo3, vmax);
-      vo4 = wasm_f32x4_min(vo4, vmax);
-
-      wasm_v128_store(o4, vo4);
-      o4 += 4;
-      wasm_v128_store(o3, vo3);
-      o3 += 4;
-      wasm_v128_store(o2, vo2);
-      o2 += 4;
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
-      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
-      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
-      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-      vo3 = wasm_f32x4_min(vo3, vmax);
-      vo4 = wasm_f32x4_min(vo4, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o4, vo4);
-        o4 += 4;
-        wasm_v128_store(o3, vo3);
-        o3 += 4;
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0);
-          o4 += 2;
-          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
-          o3 += 2;
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
-          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o4 = wasm_f32x4_extract_lane(vo4, 0);
-          o4 += 1;
-          *o3 = wasm_f32x4_extract_lane(vo3, 0);
-          o3 += 1;
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-    i5 = (const float*) ((uintptr_t) i4 + input_width);
-    i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-    o0 = o4;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-    o3 = (float*) ((uintptr_t) o2 + input_width);
-    o4 = (float*) ((uintptr_t) o3 + input_width);
-
-    output_height = doz(output_height, 5);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-6x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-6x4.c
deleted file mode 100644
index b452f18..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-6x4.c
+++ /dev/null
@@ -1,433 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
-  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
-  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-  float* o3 = (float*) ((uintptr_t) o2 + input_width);
-  float* o4 = (float*) ((uintptr_t) o3 + input_width);
-  float* o5 = (float*) ((uintptr_t) o4 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i4 = zero;
-      o3 = o2;
-    }
-    if XNN_UNPREDICTABLE(output_height < 5) {
-      i5 = zero;
-      o4 = o3;
-    }
-    if XNN_UNPREDICTABLE(output_height < 6) {
-      i6 = zero;
-      o5 = o4;
-    }
-    if XNN_UNPREDICTABLE(output_height < 7) {
-      i7 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi5x0123 = vzero;
-    v128_t vi6x0123 = vzero;
-    v128_t vi7x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-    v128_t vi5x4567 = wasm_v128_load(i5);
-    i5 += 4;
-    v128_t vi6x4567 = wasm_v128_load(i6);
-    i6 += 4;
-    v128_t vi7x4567 = wasm_v128_load(i7);
-    i7 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-      i4 += 4;
-      const v128_t vi5x89AB = wasm_v128_load(i5);
-      i5 += 4;
-      const v128_t vi6x89AB = wasm_v128_load(i6);
-      i6 += 4;
-      const v128_t vi7x89AB = wasm_v128_load(i7);
-      i7 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
-      v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-      vi7x0123 = vi7x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-      vi7x4567 = vi7x89AB;
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
-      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
-      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
-      v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-      vo3 = wasm_f32x4_min(vo3, vmax);
-      vo4 = wasm_f32x4_min(vo4, vmax);
-      vo5 = wasm_f32x4_min(vo5, vmax);
-
-      wasm_v128_store(o5, vo5);
-      o5 += 4;
-      wasm_v128_store(o4, vo4);
-      o4 += 4;
-      wasm_v128_store(o3, vo3);
-      o3 += 4;
-      wasm_v128_store(o2, vo2);
-      o2 += 4;
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
-      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
-      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
-      v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
-
-
-      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
-      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
-      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
-      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
-      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
-      v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-      vo3 = wasm_f32x4_min(vo3, vmax);
-      vo4 = wasm_f32x4_min(vo4, vmax);
-      vo5 = wasm_f32x4_min(vo5, vmax);
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o5, vo5);
-        o5 += 4;
-        wasm_v128_store(o4, vo4);
-        o4 += 4;
-        wasm_v128_store(o3, vo3);
-        o3 += 4;
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o5) = wasm_f64x2_extract_lane(vo5, 0);
-          o5 += 2;
-          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0);
-          o4 += 2;
-          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
-          o3 += 2;
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
-          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
-          vo5 = wasm_v32x4_shuffle(vo5, vo5, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o5 = wasm_f32x4_extract_lane(vo5, 0);
-          o5 += 1;
-          *o4 = wasm_f32x4_extract_lane(vo4, 0);
-          o4 += 1;
-          *o3 = wasm_f32x4_extract_lane(vo3, 0);
-          o3 += 1;
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-    i5 = (const float*) ((uintptr_t) i4 + input_width);
-    i6 = (const float*) ((uintptr_t) i5 + input_width);
-    i7 = (const float*) ((uintptr_t) i6 + input_width);
-
-    o0 = o5;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-    o3 = (float*) ((uintptr_t) o2 + input_width);
-    o4 = (float*) ((uintptr_t) o3 + input_width);
-    o5 = (float*) ((uintptr_t) o4 + input_width);
-
-    output_height = doz(output_height, 6);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
new file mode 100644
index 0000000..d253616
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
@@ -0,0 +1,181 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
new file mode 100644
index 0000000..0e32cf0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
@@ -0,0 +1,183 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
new file mode 100644
index 0000000..ef20d0f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
@@ -0,0 +1,185 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4.c
new file mode 100644
index 0000000..2a5883e
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-1x4.c
@@ -0,0 +1,179 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
new file mode 100644
index 0000000..374b5ea
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
@@ -0,0 +1,236 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4.c
new file mode 100644
index 0000000..ba53f39
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-2x4.c
@@ -0,0 +1,232 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-3x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-3x4.c
new file mode 100644
index 0000000..770dba9
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-3x4.c
@@ -0,0 +1,283 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4);
+    i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      i4 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2);
+      o2 += 4;
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o2, vo2);
+        o2 += 4;
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
+          o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0);
+          o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-4x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-4x4.c
new file mode 100644
index 0000000..f544761
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-4x4.c
@@ -0,0 +1,334 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4);
+    i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5);
+    i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      i5 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3);
+      o3 += 4;
+      wasm_v128_store(o2, vo2);
+      o2 += 4;
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o3, vo3);
+        o3 += 4;
+        wasm_v128_store(o2, vo2);
+        o2 += 4;
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
+          o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
+          o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0);
+          o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0);
+          o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-5x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-5x4.c
new file mode 100644
index 0000000..b0cdabf
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-5x4.c
@@ -0,0 +1,385 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4);
+    i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5);
+    i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6);
+    i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      i6 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+      vi6x0123 = vi6x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      wasm_v128_store(o4, vo4);
+      o4 += 4;
+      wasm_v128_store(o3, vo3);
+      o3 += 4;
+      wasm_v128_store(o2, vo2);
+      o2 += 4;
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o4, vo4);
+        o4 += 4;
+        wasm_v128_store(o3, vo3);
+        o3 += 4;
+        wasm_v128_store(o2, vo2);
+        o2 += 4;
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0);
+          o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
+          o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
+          o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o4 = wasm_f32x4_extract_lane(vo4, 0);
+          o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0);
+          o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0);
+          o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o4;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+
+    output_height = doz(output_height, 5);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-6x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-6x4.c
new file mode 100644
index 0000000..2c377ea
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-loadsplat-6x4.c
@@ -0,0 +1,436 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+  float* o5 = (float*) ((uintptr_t) o4 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+      o5 = o4;
+    }
+    if XNN_UNPREDICTABLE(output_height < 7) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4);
+    i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5);
+    i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6);
+    i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7);
+    i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      i7 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
+      v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+      vi6x0123 = vi6x4567;
+      vi7x0123 = vi7x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+      vi7x4567 = vi7x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+      vo5 = wasm_f32x4_min(vo5, vmax);
+
+      wasm_v128_store(o5, vo5);
+      o5 += 4;
+      wasm_v128_store(o4, vo4);
+      o4 += 4;
+      wasm_v128_store(o3, vo3);
+      o3 += 4;
+      wasm_v128_store(o2, vo2);
+      o2 += 4;
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
+      v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+      vo5 = wasm_f32x4_min(vo5, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o5, vo5);
+        o5 += 4;
+        wasm_v128_store(o4, vo4);
+        o4 += 4;
+        wasm_v128_store(o3, vo3);
+        o3 += 4;
+        wasm_v128_store(o2, vo2);
+        o2 += 4;
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o5) = wasm_f64x2_extract_lane(vo5, 0);
+          o5 += 2;
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0);
+          o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
+          o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
+          o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+          vo5 = wasm_v32x4_shuffle(vo5, vo5, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o5 = wasm_f32x4_extract_lane(vo5, 0);
+          o5 += 1;
+          *o4 = wasm_f32x4_extract_lane(vo4, 0);
+          o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0);
+          o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0);
+          o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o5;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+    o5 = (float*) ((uintptr_t) o4 + input_width);
+
+    output_height = doz(output_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc2.c
new file mode 100644
index 0000000..611ee44
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc2.c
@@ -0,0 +1,176 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc3.c
new file mode 100644
index 0000000..d1a2a5a
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc3.c
@@ -0,0 +1,178 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc4.c
new file mode 100644
index 0000000..c1a765b
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4-acc4.c
@@ -0,0 +1,180 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4.c
new file mode 100644
index 0000000..b0108f7
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-1x4.c
@@ -0,0 +1,174 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4-acc2.c
new file mode 100644
index 0000000..d95a3a0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4-acc2.c
@@ -0,0 +1,227 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4.c
new file mode 100644
index 0000000..3c46bb4
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-2x4.c
@@ -0,0 +1,223 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-3x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-3x4.c
new file mode 100644
index 0000000..4459ff3
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-3x4.c
@@ -0,0 +1,270 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-4x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-4x4.c
new file mode 100644
index 0000000..cddf8ad
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-4x4.c
@@ -0,0 +1,317 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-5x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-5x4.c
new file mode 100644
index 0000000..216dce6
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-5x4.c
@@ -0,0 +1,364 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+      vi6x0123 = vi6x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o4, vo4); o4 += 4;
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0); o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o4 = wasm_f32x4_extract_lane(vo4, 0); o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o4;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+
+    output_height = doz(output_height, 5);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-6x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-6x4.c
new file mode 100644
index 0000000..7bd56f2
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-arm-splat-6x4.c
@@ -0,0 +1,411 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+  float* o5 = (float*) ((uintptr_t) o4 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+      o5 = o4;
+    }
+    if XNN_UNPREDICTABLE(output_height < 7) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+      vi6x0123 = vi6x4567;
+      vi7x0123 = vi7x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+      vi7x4567 = vi7x89AB;
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+      vo5 = wasm_f32x4_min(vo5, vmax);
+
+      wasm_v128_store(o5, vo5); o5 += 4;
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+      vo5 = wasm_f32x4_min(vo5, vmax);
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o5, vo5); o5 += 4;
+        wasm_v128_store(o4, vo4); o4 += 4;
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o5) = wasm_f64x2_extract_lane(vo5, 0); o5 += 2;
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0); o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+          vo5 = wasm_v32x4_shuffle(vo5, vo5, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o5 = wasm_f32x4_extract_lane(vo5, 0); o5 += 1;
+          *o4 = wasm_f32x4_extract_lane(vo4, 0); o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o5;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+    o5 = (float*) ((uintptr_t) o4 + input_width);
+
+    output_height = doz(output_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc2.c
deleted file mode 100644
index fe2f43a..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc2.c
+++ /dev/null
@@ -1,178 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc2(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  float* o0 = output;
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-
-  } while (--output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc3.c
deleted file mode 100644
index c0c153c..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc3.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  float* o0 = output;
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-
-  } while (--output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc4.c
deleted file mode 100644
index b131a5d..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4-acc4.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  float* o0 = output;
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-
-      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-
-  } while (--output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4.c
deleted file mode 100644
index bdf340e..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-1x4.c
+++ /dev/null
@@ -1,176 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  float* o0 = output;
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-
-  } while (--output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4-acc2.c
deleted file mode 100644
index 5ec1d40..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4-acc2.c
+++ /dev/null
@@ -1,233 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
-      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-
-      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
-      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-
-    o0 = o1;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-
-    output_height = doz(output_height, 2);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4.c
deleted file mode 100644
index bbc569a..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-2x4.c
+++ /dev/null
@@ -1,229 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-
-    o0 = o1;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-
-    output_height = doz(output_height, 2);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-3x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-3x4.c
deleted file mode 100644
index ea73bde..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-3x4.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i4 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-      i4 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-
-      wasm_v128_store(o2, vo2);
-      o2 += 4;
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-    o0 = o2;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-
-    output_height = doz(output_height, 3);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-4x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-4x4.c
deleted file mode 100644
index 739d69c..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-4x4.c
+++ /dev/null
@@ -1,331 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-  float* o3 = (float*) ((uintptr_t) o2 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i4 = zero;
-      o3 = o2;
-    }
-    if XNN_UNPREDICTABLE(output_height < 5) {
-      i5 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi5x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-    v128_t vi5x4567 = wasm_v128_load(i5);
-    i5 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-      i4 += 4;
-      const v128_t vi5x89AB = wasm_v128_load(i5);
-      i5 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
-      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
-
-      wasm_v128_store(o3, vo3);
-      o3 += 4;
-      wasm_v128_store(o2, vo2);
-      o2 += 4;
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
-      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o3, vo3);
-        o3 += 4;
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
-          o3 += 2;
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o3 = wasm_f32x4_extract_lane(vo3, 0);
-          o3 += 1;
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-    i5 = (const float*) ((uintptr_t) i4 + input_width);
-
-    o0 = o3;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-    o3 = (float*) ((uintptr_t) o2 + input_width);
-
-    output_height = doz(output_height, 4);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-5x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-5x4.c
deleted file mode 100644
index 77f21f4..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-5x4.c
+++ /dev/null
@@ -1,382 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
-  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-  float* o3 = (float*) ((uintptr_t) o2 + input_width);
-  float* o4 = (float*) ((uintptr_t) o3 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i4 = zero;
-      o3 = o2;
-    }
-    if XNN_UNPREDICTABLE(output_height < 5) {
-      i5 = zero;
-      o4 = o3;
-    }
-    if XNN_UNPREDICTABLE(output_height < 6) {
-      i6 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi5x0123 = vzero;
-    v128_t vi6x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-    v128_t vi5x4567 = wasm_v128_load(i5);
-    i5 += 4;
-    v128_t vi6x4567 = wasm_v128_load(i6);
-    i6 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-      i4 += 4;
-      const v128_t vi5x89AB = wasm_v128_load(i5);
-      i5 += 4;
-      const v128_t vi6x89AB = wasm_v128_load(i6);
-      i6 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
-      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
-      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
-      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
-
-      wasm_v128_store(o4, vo4);
-      o4 += 4;
-      wasm_v128_store(o3, vo3);
-      o3 += 4;
-      wasm_v128_store(o2, vo2);
-      o2 += 4;
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
-      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
-      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
-      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
-      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o4, vo4);
-        o4 += 4;
-        wasm_v128_store(o3, vo3);
-        o3 += 4;
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0);
-          o4 += 2;
-          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
-          o3 += 2;
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
-          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o4 = wasm_f32x4_extract_lane(vo4, 0);
-          o4 += 1;
-          *o3 = wasm_f32x4_extract_lane(vo3, 0);
-          o3 += 1;
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-    i5 = (const float*) ((uintptr_t) i4 + input_width);
-    i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-    o0 = o4;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-    o3 = (float*) ((uintptr_t) o2 + input_width);
-    o4 = (float*) ((uintptr_t) o3 + input_width);
-
-    output_height = doz(output_height, 5);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-6x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-6x4.c
deleted file mode 100644
index 4905d09..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-6x4.c
+++ /dev/null
@@ -1,433 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 1);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = input;
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
-  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
-  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-  float* o3 = (float*) ((uintptr_t) o2 + input_width);
-  float* o4 = (float*) ((uintptr_t) o3 + input_width);
-  float* o5 = (float*) ((uintptr_t) o4 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i2 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height < 3) {
-      i3 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i4 = zero;
-      o3 = o2;
-    }
-    if XNN_UNPREDICTABLE(output_height < 5) {
-      i5 = zero;
-      o4 = o3;
-    }
-    if XNN_UNPREDICTABLE(output_height < 6) {
-      i6 = zero;
-      o5 = o4;
-    }
-    if XNN_UNPREDICTABLE(output_height < 7) {
-      i7 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi5x0123 = vzero;
-    v128_t vi6x0123 = vzero;
-    v128_t vi7x0123 = vzero;
-
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-    v128_t vi5x4567 = wasm_v128_load(i5);
-    i5 += 4;
-    v128_t vi6x4567 = wasm_v128_load(i6);
-    i6 += 4;
-    v128_t vi7x4567 = wasm_v128_load(i7);
-    i7 += 4;
-
-    size_t w = input_width;
-    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-      i4 += 4;
-      const v128_t vi5x89AB = wasm_v128_load(i5);
-      i5 += 4;
-      const v128_t vi6x89AB = wasm_v128_load(i6);
-      i6 += 4;
-      const v128_t vi7x89AB = wasm_v128_load(i7);
-      i7 += 4;
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
-      v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-      vi7x0123 = vi7x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-      vi7x4567 = vi7x89AB;
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
-      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
-      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
-      v128_t vo5 = wasm_v128_bitselect(vmin, vo5p0, wasm_f32x4_lt(vo5p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
-      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
-      vo5 = wasm_v128_bitselect(vo5, vmax, wasm_f32x4_le(vo5, vmax));
-
-      wasm_v128_store(o5, vo5);
-      o5 += 4;
-      wasm_v128_store(o4, vo4);
-      o4 += 4;
-      wasm_v128_store(o3, vo3);
-      o3 += 4;
-      wasm_v128_store(o2, vo2);
-      o2 += 4;
-      wasm_v128_store(o1, vo1);
-      o1 += 4;
-      wasm_v128_store(o0, vo0);
-      o0 += 4;
-    }
-    // Always process the last block of 1..4 pixels.
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
-      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
-      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
-
-      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
-      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
-      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
-      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
-      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
-      v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
-
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
-      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
-      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
-      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
-      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
-      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
-      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
-
-
-      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
-      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
-      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
-      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
-      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
-      v128_t vo5 = wasm_v128_bitselect(vmin, vo5p0, wasm_f32x4_lt(vo5p0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
-      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
-      vo5 = wasm_v128_bitselect(vo5, vmax, wasm_f32x4_le(vo5, vmax));
-
-      if XNN_LIKELY(w == 4 * sizeof(float)) {
-        wasm_v128_store(o5, vo5);
-        o5 += 4;
-        wasm_v128_store(o4, vo4);
-        o4 += 4;
-        wasm_v128_store(o3, vo3);
-        o3 += 4;
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o5) = wasm_f64x2_extract_lane(vo5, 0);
-          o5 += 2;
-          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0);
-          o4 += 2;
-          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
-          o3 += 2;
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
-          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
-          vo5 = wasm_v32x4_shuffle(vo5, vo5, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o5 = wasm_f32x4_extract_lane(vo5, 0);
-          o5 += 1;
-          *o4 = wasm_f32x4_extract_lane(vo4, 0);
-          o4 += 1;
-          *o3 = wasm_f32x4_extract_lane(vo3, 0);
-          o3 += 1;
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-    i5 = (const float*) ((uintptr_t) i4 + input_width);
-    i6 = (const float*) ((uintptr_t) i5 + input_width);
-    i7 = (const float*) ((uintptr_t) i6 + input_width);
-
-    o0 = o5;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-    o3 = (float*) ((uintptr_t) o2 + input_width);
-    o4 = (float*) ((uintptr_t) o3 + input_width);
-    o5 = (float*) ((uintptr_t) o4 + input_width);
-
-    output_height = doz(output_height, 6);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
new file mode 100644
index 0000000..66bfd4a
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
@@ -0,0 +1,181 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
new file mode 100644
index 0000000..0bae763
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
@@ -0,0 +1,183 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
new file mode 100644
index 0000000..40e229a
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
@@ -0,0 +1,185 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4.c
new file mode 100644
index 0000000..484916c
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-1x4.c
@@ -0,0 +1,179 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
new file mode 100644
index 0000000..4460a21
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
@@ -0,0 +1,236 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4.c
new file mode 100644
index 0000000..a31d497
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-2x4.c
@@ -0,0 +1,232 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-3x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-3x4.c
new file mode 100644
index 0000000..919d7db
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-3x4.c
@@ -0,0 +1,283 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4);
+    i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      i4 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2);
+      o2 += 4;
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o2, vo2);
+        o2 += 4;
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
+          o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0);
+          o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-4x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-4x4.c
new file mode 100644
index 0000000..313217d
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-4x4.c
@@ -0,0 +1,334 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4);
+    i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5);
+    i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      i5 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3);
+      o3 += 4;
+      wasm_v128_store(o2, vo2);
+      o2 += 4;
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o3, vo3);
+        o3 += 4;
+        wasm_v128_store(o2, vo2);
+        o2 += 4;
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
+          o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
+          o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0);
+          o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0);
+          o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-5x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-5x4.c
new file mode 100644
index 0000000..bdd6d74
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-5x4.c
@@ -0,0 +1,385 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4);
+    i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5);
+    i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6);
+    i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      i6 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+      vi6x0123 = vi6x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      wasm_v128_store(o4, vo4);
+      o4 += 4;
+      wasm_v128_store(o3, vo3);
+      o3 += 4;
+      wasm_v128_store(o2, vo2);
+      o2 += 4;
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o4, vo4);
+        o4 += 4;
+        wasm_v128_store(o3, vo3);
+        o3 += 4;
+        wasm_v128_store(o2, vo2);
+        o2 += 4;
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0);
+          o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
+          o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
+          o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o4 = wasm_f32x4_extract_lane(vo4, 0);
+          o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0);
+          o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0);
+          o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o4;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+
+    output_height = doz(output_height, 5);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-6x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-6x4.c
new file mode 100644
index 0000000..b687128
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-loadsplat-6x4.c
@@ -0,0 +1,436 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+  float* o5 = (float*) ((uintptr_t) o4 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+      o5 = o4;
+    }
+    if XNN_UNPREDICTABLE(output_height < 7) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0);
+    i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1);
+    i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2);
+    i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3);
+    i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4);
+    i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5);
+    i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6);
+    i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7);
+    i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      i7 += 4;
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
+      v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+      vi6x0123 = vi6x4567;
+      vi7x0123 = vi7x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+      vi7x4567 = vi7x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      v128_t vo5 = wasm_v128_bitselect(vmin, vo5p0, wasm_f32x4_lt(vo5p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+      vo5 = wasm_v128_bitselect(vo5, vmax, wasm_f32x4_le(vo5, vmax));
+
+      wasm_v128_store(o5, vo5);
+      o5 += 4;
+      wasm_v128_store(o4, vo4);
+      o4 += 4;
+      wasm_v128_store(o3, vo3);
+      o3 += 4;
+      wasm_v128_store(o2, vo2);
+      o2 += 4;
+      wasm_v128_store(o1, vo1);
+      o1 += 4;
+      wasm_v128_store(o0, vo0);
+      o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
+      v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
+      v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
+      v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
+      v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
+      v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      v128_t vo5 = wasm_v128_bitselect(vmin, vo5p0, wasm_f32x4_lt(vo5p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+      vo5 = wasm_v128_bitselect(vo5, vmax, wasm_f32x4_le(vo5, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o5, vo5);
+        o5 += 4;
+        wasm_v128_store(o4, vo4);
+        o4 += 4;
+        wasm_v128_store(o3, vo3);
+        o3 += 4;
+        wasm_v128_store(o2, vo2);
+        o2 += 4;
+        wasm_v128_store(o1, vo1);
+        o1 += 4;
+        wasm_v128_store(o0, vo0);
+        o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o5) = wasm_f64x2_extract_lane(vo5, 0);
+          o5 += 2;
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0);
+          o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
+          o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
+          o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
+          o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
+          o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+          vo5 = wasm_v32x4_shuffle(vo5, vo5, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o5 = wasm_f32x4_extract_lane(vo5, 0);
+          o5 += 1;
+          *o4 = wasm_f32x4_extract_lane(vo4, 0);
+          o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0);
+          o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0);
+          o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0);
+          o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0);
+          o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o5;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+    o5 = (float*) ((uintptr_t) o4 + input_width);
+
+    output_height = doz(output_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc2.c
new file mode 100644
index 0000000..74f9e78
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc2.c
@@ -0,0 +1,176 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc3.c
new file mode 100644
index 0000000..b8b3ce6
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc3.c
@@ -0,0 +1,178 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc4.c
new file mode 100644
index 0000000..0e4aba1
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4-acc4.c
@@ -0,0 +1,180 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4.c
new file mode 100644
index 0000000..79a12f0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4.c
@@ -0,0 +1,174 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4-acc2.c
new file mode 100644
index 0000000..929e64f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4-acc2.c
@@ -0,0 +1,227 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4.c
new file mode 100644
index 0000000..61a53c5
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-2x4.c
@@ -0,0 +1,223 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-3x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-3x4.c
new file mode 100644
index 0000000..9267435
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-3x4.c
@@ -0,0 +1,270 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-4x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-4x4.c
new file mode 100644
index 0000000..83b50ca
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-4x4.c
@@ -0,0 +1,317 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-5x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-5x4.c
new file mode 100644
index 0000000..4cb8b4a
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-5x4.c
@@ -0,0 +1,364 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+      vi6x0123 = vi6x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o4, vo4); o4 += 4;
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0); o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o4 = wasm_f32x4_extract_lane(vo4, 0); o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o4;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+
+    output_height = doz(output_height, 5);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-6x4.c b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-6x4.c
new file mode 100644
index 0000000..5ff96d6
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-6x4.c
@@ -0,0 +1,411 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 1);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = input;
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+  float* o5 = (float*) ((uintptr_t) o4 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i2 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i3 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i4 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i5 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i6 = zero;
+      o5 = o4;
+    }
+    if XNN_UNPREDICTABLE(output_height < 7) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vi0x0123 = vi0x4567;
+      vi1x0123 = vi1x4567;
+      vi2x0123 = vi2x4567;
+      vi3x0123 = vi3x4567;
+      vi4x0123 = vi4x4567;
+      vi5x0123 = vi5x4567;
+      vi6x0123 = vi6x4567;
+      vi7x0123 = vi7x4567;
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vi0x4567 = vi0x89AB;
+      vi1x4567 = vi1x89AB;
+      vi2x4567 = vi2x89AB;
+      vi3x4567 = vi3x89AB;
+      vi4x4567 = vi4x89AB;
+      vi5x4567 = vi5x89AB;
+      vi6x4567 = vi6x89AB;
+      vi7x4567 = vi7x89AB;
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      v128_t vo5 = wasm_v128_bitselect(vmin, vo5p0, wasm_f32x4_lt(vo5p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+      vo5 = wasm_v128_bitselect(vo5, vmax, wasm_f32x4_le(vo5, vmax));
+
+      wasm_v128_store(o5, vo5); o5 += 4;
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 1..4 pixels.
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      v128_t vo5 = wasm_v128_bitselect(vmin, vo5p0, wasm_f32x4_lt(vo5p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+      vo5 = wasm_v128_bitselect(vo5, vmax, wasm_f32x4_le(vo5, vmax));
+
+      if XNN_LIKELY(w == 4 * sizeof(float)) {
+        wasm_v128_store(o5, vo5); o5 += 4;
+        wasm_v128_store(o4, vo4); o4 += 4;
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o5) = wasm_f64x2_extract_lane(vo5, 0); o5 += 2;
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0); o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+          vo5 = wasm_v32x4_shuffle(vo5, vo5, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o5 = wasm_f32x4_extract_lane(vo5, 0); o5 += 1;
+          *o4 = wasm_f32x4_extract_lane(vo4, 0); o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o5;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+    o5 = (float*) ((uintptr_t) o4 + input_width);
+
+    output_height = doz(output_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
new file mode 100644
index 0000000..b119d9c
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
@@ -0,0 +1,200 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
new file mode 100644
index 0000000..e2a4838
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
@@ -0,0 +1,202 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
new file mode 100644
index 0000000..2d874a1
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
@@ -0,0 +1,204 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4.c
new file mode 100644
index 0000000..27848af
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4.c
@@ -0,0 +1,198 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
new file mode 100644
index 0000000..41eb79a
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
@@ -0,0 +1,272 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk01);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk01);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4.c
new file mode 100644
index 0000000..a5cebb5
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4.c
@@ -0,0 +1,268 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-3x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-3x4.c
new file mode 100644
index 0000000..5d39b30
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-3x4.c
@@ -0,0 +1,336 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i5 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i6 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+      const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+      const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+      const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+      const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+      const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
+      const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
+      const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
+      const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-4x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-4x4.c
new file mode 100644
index 0000000..e933258
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-loadsplat-4x4.c
@@ -0,0 +1,404 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+  float* o3 = (float*) ((uintptr_t) o2 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i5 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i7 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i8 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8x89AB = wasm_v128_load(i8);
+      const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+      const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+      const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+      const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+      const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+      const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+      const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+      const v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+      const v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+      const v128_t vi8x89AB = wasm_v128_load(i8);
+      const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+      const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
+      const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
+      const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
+      const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
+      const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6));
+      const v128_t vi7x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7));
+      const v128_t vi8x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6));
+      const v128_t vi8x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+    o3 = (float*) ((uintptr_t) o2 + output_width);
+
+    output_height = doz(output_height, 4);
+    padded_input_height = doz(padded_input_height, 8);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc2.c
new file mode 100644
index 0000000..18b7c68
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc2.c
@@ -0,0 +1,190 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc3.c
new file mode 100644
index 0000000..19b8f00
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc3.c
@@ -0,0 +1,192 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc4.c
new file mode 100644
index 0000000..d77d502
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc4.c
@@ -0,0 +1,194 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4.c
new file mode 100644
index 0000000..4b419df
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-1x4.c
@@ -0,0 +1,188 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4-acc2.c
new file mode 100644
index 0000000..993d54b
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4-acc2.c
@@ -0,0 +1,262 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4.c
new file mode 100644
index 0000000..6a62c87
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-2x4.c
@@ -0,0 +1,258 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-3x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-3x4.c
new file mode 100644
index 0000000..407689f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-3x4.c
@@ -0,0 +1,326 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i5 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i6 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+      const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+      const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+      const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+      const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+      const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
+      const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
+      const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
+      const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-4x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-4x4.c
new file mode 100644
index 0000000..b98974f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-arm-splat-4x4.c
@@ -0,0 +1,394 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+  float* o3 = (float*) ((uintptr_t) o2 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i5 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i7 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i8 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8x89AB = wasm_v128_load(i8);
+      const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+      const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+      const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+      const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+      const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+      const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+      const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+      const v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+      const v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+      const v128_t vi8x89AB = wasm_v128_load(i8);
+      const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+      const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
+      const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
+      const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
+      const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
+      const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6));
+      const v128_t vi7x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7));
+      const v128_t vi8x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6));
+      const v128_t vi8x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+    o3 = (float*) ((uintptr_t) o2 + output_width);
+
+    output_height = doz(output_height, 4);
+    padded_input_height = doz(padded_input_height, 8);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
new file mode 100644
index 0000000..4ff6f4e
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
@@ -0,0 +1,200 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
new file mode 100644
index 0000000..64986b8
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
@@ -0,0 +1,202 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
new file mode 100644
index 0000000..d5cf4e0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
@@ -0,0 +1,204 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4.c
new file mode 100644
index 0000000..df636a4
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4.c
@@ -0,0 +1,198 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
new file mode 100644
index 0000000..957887d
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
@@ -0,0 +1,272 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk01);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk01);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4.c
new file mode 100644
index 0000000..d31908d
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4.c
@@ -0,0 +1,268 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-3x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-3x4.c
new file mode 100644
index 0000000..74a3427
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-3x4.c
@@ -0,0 +1,336 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i5 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i6 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+      const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+      const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+      const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+      const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+      const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
+      const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
+      const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
+      const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-4x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-4x4.c
new file mode 100644
index 0000000..4284629
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-loadsplat-4x4.c
@@ -0,0 +1,404 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
+  const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+  float* o3 = (float*) ((uintptr_t) o2 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i5 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i7 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i8 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8x89AB = wasm_v128_load(i8);
+      const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+      const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+      const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+      const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+      const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+      const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+      const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+      const v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+      const v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+      const v128_t vi8x89AB = wasm_v128_load(i8);
+      const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+      const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
+      const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
+      const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
+      const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
+      const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6));
+      const v128_t vi7x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7));
+      const v128_t vi8x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6));
+      const v128_t vi8x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, vk21));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, vk22));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+    o3 = (float*) ((uintptr_t) o2 + output_width);
+
+    output_height = doz(output_height, 4);
+    padded_input_height = doz(padded_input_height, 8);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc2.c
new file mode 100644
index 0000000..803f188
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc2.c
@@ -0,0 +1,190 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc3.c
new file mode 100644
index 0000000..3c9b56b
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc3.c
@@ -0,0 +1,192 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc4.c
new file mode 100644
index 0000000..d63e08c
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc4.c
@@ -0,0 +1,194 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4.c
new file mode 100644
index 0000000..d7fb905
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-1x4.c
@@ -0,0 +1,188 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4-acc2.c
new file mode 100644
index 0000000..d10514b
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4-acc2.c
@@ -0,0 +1,262 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4.c
new file mode 100644
index 0000000..4672a86
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-2x4.c
@@ -0,0 +1,258 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-3x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-3x4.c
new file mode 100644
index 0000000..3c009b8
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-3x4.c
@@ -0,0 +1,326 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i5 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i6 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+      const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+      const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+      const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+      const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+      const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
+      const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
+      const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
+      const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-4x4.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-4x4.c
new file mode 100644
index 0000000..2965c1e
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/3x3s2p1-minmax-wasmsimd-x86-splat-4x4.c
@@ -0,0 +1,394 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 0);
+  assert(padding_top <= 1);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
+  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
+  if XNN_UNPREDICTABLE(padding_top != 0) {
+    i0 = zero;
+  }
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+  float* o3 = (float*) ((uintptr_t) o2 + output_width);
+
+  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
+  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 4) {
+      i2 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 5) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i5 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i7 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i8 = zero;
+    }
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    size_t w = input_width;
+    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8x89AB = wasm_v128_load(i8);
+      const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+      const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+      const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+      const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+      const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+      const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+      const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+      const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+      const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+      const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+      const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+      const v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+      const v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 0-7 pixels to process.
+    assert(w < 8 * sizeof(float));
+    if XNN_LIKELY(w != 0) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0);
+      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+      const v128_t vi1x89AB = wasm_v128_load(i1);
+      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+      const v128_t vi2x89AB = wasm_v128_load(i2);
+      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+      const v128_t vi3x89AB = wasm_v128_load(i3);
+      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+      const v128_t vi4x89AB = wasm_v128_load(i4);
+      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+      const v128_t vi5x89AB = wasm_v128_load(i5);
+      const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+      const v128_t vi6x89AB = wasm_v128_load(i6);
+      const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+      const v128_t vi7x89AB = wasm_v128_load(i7);
+      const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+      const v128_t vi8x89AB = wasm_v128_load(i8);
+      const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+
+      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
+      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
+      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
+      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
+      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
+      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
+      const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
+      const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
+      const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
+      const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
+      const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
+      const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
+      const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
+      const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
+      const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6));
+      const v128_t vi7x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7));
+      const v128_t vi8x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6));
+      const v128_t vi8x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
+
+      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      w += 1 * sizeof(float);
+      if (w & (8 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (4 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (2 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i0 + input_width);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+    o3 = (float*) ((uintptr_t) o2 + output_width);
+
+    output_height = doz(output_height, 4);
+    padded_input_height = doz(padded_input_height, 8);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-arm.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-arm.c
deleted file mode 100644
index 8b1c61a..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-arm.c
+++ /dev/null
@@ -1,199 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-1x4-acc3.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top >= 0);
-  assert(padding_top <= 1);
-
-  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
-  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
-
-  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
-  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
-  if XNN_UNPREDICTABLE(padding_top != 0) {
-    i0 = zero;
-  }
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
-  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
-  do {
-    if XNN_UNPREDICTABLE(padded_input_height <= 3) {
-      i2 = zero;
-    }
-
-    v128_t vi0x7531 = vzero;
-    v128_t vi1x7531 = vzero;
-    v128_t vi2x7531 = vzero;
-
-    size_t w = input_width;
-    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
-      v128_t vo8ACEp0 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-
-      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
-      i0 += 8;
-      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
-      i1 += 8;
-      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
-      i2 += 8;
-
-      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 4 + 1, 4 + 3);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x8ACE, vk01));
-      v128_t vo8ACEp1 = wasm_f32x4_mul(vi1x8ACE, vk11);
-      v128_t vo8ACEp2 = wasm_f32x4_mul(vi2x8ACE, vk21);
-
-      const v128_t vi0xF9BD = wasm_v32x4_shuffle(vi0x9BDF, vi0x9BDF, 3, 0, 1, 2);
-      const v128_t vi1xF9BD = wasm_v32x4_shuffle(vi1x9BDF, vi1x9BDF, 3, 0, 1, 2);
-      const v128_t vi2xF9BD = wasm_v32x4_shuffle(vi2x9BDF, vi2x9BDF, 3, 0, 1, 2);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x9BDF, vk02));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x9BDF, vk12));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x9BDF, vk22));
-
-      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0xF9BD, vi0x7531, 4, 1, 2, 3);
-      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1xF9BD, vi1x7531, 4, 1, 2, 3);
-      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2xF9BD, vi2x7531, 4, 1, 2, 3);
-
-      vi0x7531 = vi0xF9BD;
-      vi1x7531 = vi1xF9BD;
-      vi2x7531 = vi2xF9BD;
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x7BDF, vk00));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x7BDF, vk10));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x7BDF, vk20));
-
-      v128_t vo = wasm_f32x4_add(vo8ACEp0, vo8ACEp1);
-      vo = wasm_f32x4_add(vo, vo8ACEp2);
-
-      vo = wasm_f32x4_max(vo, vmin);
-      vo = wasm_f32x4_min(vo, vmax);
-
-      wasm_v128_store(output, vo);
-      output += 4;
-    }
-    // Potentially process the last block of 0..7 pixels.
-    assert(w < 8 * sizeof(float));
-    if XNN_LIKELY(w != 0) {
-      v128_t vo8ACEp0 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-
-      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
-      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
-      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
-
-      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4 + 0, 4 + 2));
-      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 4 + 1, 4 + 3));
-      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4 + 0, 4 + 2));
-      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 4 + 1, 4 + 3));
-      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4 + 0, 4 + 2));
-      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 4 + 1, 4 + 3));
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x8ACE, vk01));
-      v128_t vo8ACEp1 = wasm_f32x4_mul(vi1x8ACE, vk11);
-      v128_t vo8ACEp2 = wasm_f32x4_mul(vi2x8ACE, vk21);
-
-      const v128_t vi0xF9BD = wasm_v32x4_shuffle(vi0x9BDF, vi0x9BDF, 3, 0, 1, 2);
-      const v128_t vi1xF9BD = wasm_v32x4_shuffle(vi1x9BDF, vi1x9BDF, 3, 0, 1, 2);
-      const v128_t vi2xF9BD = wasm_v32x4_shuffle(vi2x9BDF, vi2x9BDF, 3, 0, 1, 2);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x9BDF, vk02));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x9BDF, vk12));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x9BDF, vk22));
-
-      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0xF9BD, vi0x7531, 4, 1, 2, 3);
-      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1xF9BD, vi1x7531, 4, 1, 2, 3);
-      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2xF9BD, vi2x7531, 4, 1, 2, 3);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x7BDF, vk00));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x7BDF, vk10));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x7BDF, vk20));
-
-      v128_t vo = wasm_f32x4_add(vo8ACEp0, vo8ACEp1);
-      vo = wasm_f32x4_add(vo, vo8ACEp2);
-
-      vo = wasm_f32x4_max(vo, vmin);
-      vo = wasm_f32x4_min(vo, vmax);
-
-      if (w == 7 * sizeof(float)) {
-        wasm_v128_store(output, vo);
-        output += 4;
-      } else {
-        w += 1 * sizeof(float);
-        if (w & (4 * sizeof(float))) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo, 0);
-          output += 2;
-          vo = wasm_v32x4_shuffle(vo, vo, 2, 3, 0, 1);
-        }
-        if (w & (2 * sizeof(float))) {
-          *output = wasm_f32x4_extract_lane(vo, 0);
-          output += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i0 + input_width);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-    output_height -= 1;
-    padded_input_height -= 2;
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-x86.c b/src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-x86.c
deleted file mode 100644
index 5d91189..0000000
--- a/src/f32-dwconv2d-chw/gen/3x3s2p1-wasmsimd-1x4-acc3-x86.c
+++ /dev/null
@@ -1,199 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-1x4-acc3.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top >= 0);
-  assert(padding_top <= 1);
-
-  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
-  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 9);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
-
-  const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
-  const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
-  if XNN_UNPREDICTABLE(padding_top != 0) {
-    i0 = zero;
-  }
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-  size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
-  size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
-  do {
-    if XNN_UNPREDICTABLE(padded_input_height <= 3) {
-      i2 = zero;
-    }
-
-    v128_t vi0x7531 = vzero;
-    v128_t vi1x7531 = vzero;
-    v128_t vi2x7531 = vzero;
-
-    size_t w = input_width;
-    for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
-      v128_t vo8ACEp0 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-
-      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
-      i0 += 8;
-      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
-      i1 += 8;
-      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
-      i2 += 8;
-
-      const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 4 + 1, 4 + 3);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x8ACE, vk01));
-      v128_t vo8ACEp1 = wasm_f32x4_mul(vi1x8ACE, vk11);
-      v128_t vo8ACEp2 = wasm_f32x4_mul(vi2x8ACE, vk21);
-
-      const v128_t vi0xF9BD = wasm_v32x4_shuffle(vi0x9BDF, vi0x9BDF, 3, 0, 1, 2);
-      const v128_t vi1xF9BD = wasm_v32x4_shuffle(vi1x9BDF, vi1x9BDF, 3, 0, 1, 2);
-      const v128_t vi2xF9BD = wasm_v32x4_shuffle(vi2x9BDF, vi2x9BDF, 3, 0, 1, 2);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x9BDF, vk02));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x9BDF, vk12));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x9BDF, vk22));
-
-      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0xF9BD, vi0x7531, 4, 1, 2, 3);
-      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1xF9BD, vi1x7531, 4, 1, 2, 3);
-      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2xF9BD, vi2x7531, 4, 1, 2, 3);
-
-      vi0x7531 = vi0xF9BD;
-      vi1x7531 = vi1xF9BD;
-      vi2x7531 = vi2xF9BD;
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x7BDF, vk00));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x7BDF, vk10));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x7BDF, vk20));
-
-      v128_t vo = wasm_f32x4_add(vo8ACEp0, vo8ACEp1);
-      vo = wasm_f32x4_add(vo, vo8ACEp2);
-
-      vo = wasm_v128_bitselect(vmin, vo, wasm_f32x4_lt(vo, vmin));
-      vo = wasm_v128_bitselect(vo, vmax, wasm_f32x4_le(vo, vmax));
-
-      wasm_v128_store(output, vo);
-      output += 4;
-    }
-    // Potentially process the last block of 0..7 pixels.
-    assert(w < 8 * sizeof(float));
-    if XNN_LIKELY(w != 0) {
-      v128_t vo8ACEp0 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-
-      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
-      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
-      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
-
-      const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4 + 0, 4 + 2));
-      const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 4 + 1, 4 + 3));
-      const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4 + 0, 4 + 2));
-      const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 4 + 1, 4 + 3));
-      const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4 + 0, 4 + 2));
-      const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 4 + 1, 4 + 3));
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x8ACE, vk01));
-      v128_t vo8ACEp1 = wasm_f32x4_mul(vi1x8ACE, vk11);
-      v128_t vo8ACEp2 = wasm_f32x4_mul(vi2x8ACE, vk21);
-
-      const v128_t vi0xF9BD = wasm_v32x4_shuffle(vi0x9BDF, vi0x9BDF, 3, 0, 1, 2);
-      const v128_t vi1xF9BD = wasm_v32x4_shuffle(vi1x9BDF, vi1x9BDF, 3, 0, 1, 2);
-      const v128_t vi2xF9BD = wasm_v32x4_shuffle(vi2x9BDF, vi2x9BDF, 3, 0, 1, 2);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x9BDF, vk02));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x9BDF, vk12));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x9BDF, vk22));
-
-      const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0xF9BD, vi0x7531, 4, 1, 2, 3);
-      const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1xF9BD, vi1x7531, 4, 1, 2, 3);
-      const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2xF9BD, vi2x7531, 4, 1, 2, 3);
-
-      vo8ACEp0 = wasm_f32x4_add(vo8ACEp0, wasm_f32x4_mul(vi0x7BDF, vk00));
-      vo8ACEp1 = wasm_f32x4_add(vo8ACEp1, wasm_f32x4_mul(vi1x7BDF, vk10));
-      vo8ACEp2 = wasm_f32x4_add(vo8ACEp2, wasm_f32x4_mul(vi2x7BDF, vk20));
-
-      v128_t vo = wasm_f32x4_add(vo8ACEp0, vo8ACEp1);
-      vo = wasm_f32x4_add(vo, vo8ACEp2);
-
-      vo = wasm_v128_bitselect(vmin, vo, wasm_f32x4_lt(vo, vmin));
-      vo = wasm_v128_bitselect(vo, vmax, wasm_f32x4_le(vo, vmax));
-
-      if (w == 7 * sizeof(float)) {
-        wasm_v128_store(output, vo);
-        output += 4;
-      } else {
-        w += 1 * sizeof(float);
-        if (w & (4 * sizeof(float))) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo, 0);
-          output += 2;
-          vo = wasm_v32x4_shuffle(vo, vo, 2, 3, 0, 1);
-        }
-        if (w & (2 * sizeof(float))) {
-          *output = wasm_f32x4_extract_lane(vo, 0);
-          output += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i0 + input_width);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-
-    output_height -= 1;
-    padded_input_height -= 2;
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
new file mode 100644
index 0000000..ed07994
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
@@ -0,0 +1,427 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
new file mode 100644
index 0000000..e1c02be
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
@@ -0,0 +1,430 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
new file mode 100644
index 0000000..ddb7b16
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
@@ -0,0 +1,433 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c
new file mode 100644
index 0000000..80e8a4a
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c
@@ -0,0 +1,436 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4.c
new file mode 100644
index 0000000..a4ad75d
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-1x4.c
@@ -0,0 +1,424 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
new file mode 100644
index 0000000..eb3a541
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
@@ -0,0 +1,552 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c
new file mode 100644
index 0000000..566db9d
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c
@@ -0,0 +1,558 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4.c
new file mode 100644
index 0000000..f02273b
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-2x4.c
@@ -0,0 +1,546 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c
new file mode 100644
index 0000000..057e2ee
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c
@@ -0,0 +1,675 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4.c
new file mode 100644
index 0000000..099f43b
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-3x4.c
@@ -0,0 +1,666 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4-acc2.c
new file mode 100644
index 0000000..2c7820e
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4-acc2.c
@@ -0,0 +1,798 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4.c
new file mode 100644
index 0000000..c758592
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-4x4.c
@@ -0,0 +1,786 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-5x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-5x4.c
new file mode 100644
index 0000000..771cded
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-loadsplat-5x4.c
@@ -0,0 +1,906 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 7) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+    v128_t vi8x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+    v128_t vi8x4567 = wasm_v128_load(i8); i8 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+      v128_t vo4p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+      const v128_t vi8x89AB = wasm_v128_load(i8); i8 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+      vi8x0123 = vi8x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5);
+      vi8x4567 = vi8x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+      v128_t vo4p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+      v128_t vi8x89AB = wasm_v128_load(i8); i8 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+      vi8x89AB = wasm_v128_and(vmask, vi8x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+      vi8x0123 = vi8x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5);
+      vi8x4567 = vi8x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+      v128_t vo4p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+      vi8x4567 = wasm_v128_and(vmask, vi8x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o4, vo4); o4 += 4;
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0); o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o4 = wasm_f32x4_extract_lane(vo4, 0); o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o4;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+
+    output_height = doz(output_height, 5);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc2.c
new file mode 100644
index 0000000..dc74da2
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc2.c
@@ -0,0 +1,401 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc3.c
new file mode 100644
index 0000000..bf166a0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc3.c
@@ -0,0 +1,404 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc4.c
new file mode 100644
index 0000000..ffd949d
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc4.c
@@ -0,0 +1,407 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc5.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc5.c
new file mode 100644
index 0000000..638032f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4-acc5.c
@@ -0,0 +1,410 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4.c
new file mode 100644
index 0000000..0bad9f5
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-1x4.c
@@ -0,0 +1,398 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc2.c
new file mode 100644
index 0000000..58986d0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc2.c
@@ -0,0 +1,526 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc3.c
new file mode 100644
index 0000000..549e2d9
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4-acc3.c
@@ -0,0 +1,532 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4.c
new file mode 100644
index 0000000..aad8b17
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-2x4.c
@@ -0,0 +1,520 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4-acc2.c
new file mode 100644
index 0000000..8f6137b
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4-acc2.c
@@ -0,0 +1,649 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4.c
new file mode 100644
index 0000000..e4ea3e8
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-3x4.c
@@ -0,0 +1,640 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4-acc2.c
new file mode 100644
index 0000000..fde4f92
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4-acc2.c
@@ -0,0 +1,772 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4.c
new file mode 100644
index 0000000..39254f9
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-4x4.c
@@ -0,0 +1,760 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-5x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-5x4.c
new file mode 100644
index 0000000..1fc2adf
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-arm-splat-5x4.c
@@ -0,0 +1,880 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 7) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+    v128_t vi8x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+    v128_t vi8x4567 = wasm_v128_load(i8); i8 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+      const v128_t vi8x89AB = wasm_v128_load(i8); i8 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+      vi8x0123 = vi8x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5);
+      vi8x4567 = vi8x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+      v128_t vi8x89AB = wasm_v128_load(i8); i8 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+      vi8x89AB = wasm_v128_and(vmask, vi8x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+      vi8x0123 = vi8x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5);
+      vi8x4567 = vi8x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+      vi8x4567 = wasm_v128_and(vmask, vi8x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
+      v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+      vo3 = wasm_f32x4_min(vo3, vmax);
+      vo4 = wasm_f32x4_min(vo4, vmax);
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o4, vo4); o4 += 4;
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0); o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o4 = wasm_f32x4_extract_lane(vo4, 0); o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o4;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+
+    output_height = doz(output_height, 5);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
new file mode 100644
index 0000000..fb1e86e
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
@@ -0,0 +1,427 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
new file mode 100644
index 0000000..c000415
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
@@ -0,0 +1,430 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
new file mode 100644
index 0000000..80b11a1
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
@@ -0,0 +1,433 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c
new file mode 100644
index 0000000..af27604
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c
@@ -0,0 +1,436 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4.c
new file mode 100644
index 0000000..3841ff0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-1x4.c
@@ -0,0 +1,424 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
new file mode 100644
index 0000000..34e6412
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
@@ -0,0 +1,552 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c
new file mode 100644
index 0000000..1b386f0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c
@@ -0,0 +1,558 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4.c
new file mode 100644
index 0000000..5208cad
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-2x4.c
@@ -0,0 +1,546 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c
new file mode 100644
index 0000000..43b5c91
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c
@@ -0,0 +1,675 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4.c
new file mode 100644
index 0000000..f29077c
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-3x4.c
@@ -0,0 +1,666 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4-acc2.c
new file mode 100644
index 0000000..d9a8e73
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4-acc2.c
@@ -0,0 +1,798 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4.c
new file mode 100644
index 0000000..76545de
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-4x4.c
@@ -0,0 +1,786 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-5x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-5x4.c
new file mode 100644
index 0000000..b4f47f7
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-loadsplat-5x4.c
@@ -0,0 +1,906 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+  const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+  const v128_t vk00 =  wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
+  const v128_t vk01 =  wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
+  const v128_t vk02 =  wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
+  const v128_t vk03 =  wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
+  const v128_t vk04 =  wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
+  const v128_t vk10 =  wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
+  const v128_t vk11 =  wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
+  const v128_t vk12 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
+  const v128_t vk13 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
+  const v128_t vk14 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
+  const v128_t vk20 =  wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
+  const v128_t vk21 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
+  const v128_t vk22 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
+  const v128_t vk23 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
+  const v128_t vk24 =  wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
+  const v128_t vk30 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
+  const v128_t vk31 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
+  const v128_t vk32 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
+  const v128_t vk33 =  wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
+  const v128_t vk34 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
+  const v128_t vk40 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
+  const v128_t vk41 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
+  const v128_t vk42 =  wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
+  const v128_t vk43 =  wasm_v32x4_shuffle(vwOP,   vwOP,   0, 0, 0, 0);
+  const v128_t vk44 =  wasm_v32x4_shuffle(vwOP,   vwOP,   1, 1, 1, 1);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 7) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+    v128_t vi8x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+    v128_t vi8x4567 = wasm_v128_load(i8); i8 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+      v128_t vo4p0 = vbias;
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+      const v128_t vi8x89AB = wasm_v128_load(i8); i8 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+      vi8x0123 = vi8x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5);
+      vi8x4567 = vi8x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+      v128_t vo4p0 = vbias;
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+      v128_t vi8x89AB = wasm_v128_load(i8); i8 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+      vi8x89AB = wasm_v128_and(vmask, vi8x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+      vi8x0123 = vi8x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5);
+      vi8x4567 = vi8x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+      v128_t vo3p0 = vbias;
+      v128_t vo4p0 = vbias;
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+      vi8x4567 = wasm_v128_and(vmask, vi8x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, vk42));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, vk41));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, vk40));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o4, vo4); o4 += 4;
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0); o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o4 = wasm_f32x4_extract_lane(vo4, 0); o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o4;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+
+    output_height = doz(output_height, 5);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc2.c
new file mode 100644
index 0000000..871c876
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc2.c
@@ -0,0 +1,401 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc3.c
new file mode 100644
index 0000000..206a351
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc3.c
@@ -0,0 +1,404 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc4.c
new file mode 100644
index 0000000..45a2061
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc4.c
@@ -0,0 +1,407 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc5.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc5.c
new file mode 100644
index 0000000..3830895
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4-acc5.c
@@ -0,0 +1,410 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4.c
new file mode 100644
index 0000000..a5cca1c
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4.c
@@ -0,0 +1,398 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+  float* o0 = output;
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i1 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  } while (--output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc2.c
new file mode 100644
index 0000000..90b69e5
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc2.c
@@ -0,0 +1,526 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc3.c
new file mode 100644
index 0000000..e8b8145
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4-acc3.c
@@ -0,0 +1,532 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4.c
new file mode 100644
index 0000000..d61b436
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-2x4.c
@@ -0,0 +1,520 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+
+    output_height = doz(output_height, 2);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4-acc2.c
new file mode 100644
index 0000000..17b7fa4
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4-acc2.c
@@ -0,0 +1,649 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4.c
new file mode 100644
index 0000000..856d23b
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-3x4.c
@@ -0,0 +1,640 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+
+    output_height = doz(output_height, 3);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4-acc2.c
new file mode 100644
index 0000000..93aa817
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4-acc2.c
@@ -0,0 +1,772 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+      vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4.c
new file mode 100644
index 0000000..6c45903
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-4x4.c
@@ -0,0 +1,760 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+
+    o0 = o3;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+
+    output_height = doz(output_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-5x4.c b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-5x4.c
new file mode 100644
index 0000000..6501b74
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-5x4.c
@@ -0,0 +1,880 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top == 2);
+
+  const v128_t vmask = wasm_v128_load(params->scalar.mask);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = zero;
+  const float* i2 = input;
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + input_width);
+  float* o2 = (float*) ((uintptr_t) o1 + input_width);
+  float* o3 = (float*) ((uintptr_t) o2 + input_width);
+  float* o4 = (float*) ((uintptr_t) o3 + input_width);
+
+  size_t output_height = input_height;
+  do {
+    if XNN_UNPREDICTABLE(output_height < 2) {
+      i3 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(output_height < 3) {
+      i4 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(output_height < 4) {
+      i5 = zero;
+      o3 = o2;
+    }
+    if XNN_UNPREDICTABLE(output_height < 5) {
+      i6 = zero;
+      o4 = o3;
+    }
+    if XNN_UNPREDICTABLE(output_height < 6) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(output_height < 7) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0123 = vzero;
+    v128_t vi1x0123 = vzero;
+    v128_t vi2x0123 = vzero;
+    v128_t vi3x0123 = vzero;
+    v128_t vi4x0123 = vzero;
+    v128_t vi5x0123 = vzero;
+    v128_t vi6x0123 = vzero;
+    v128_t vi7x0123 = vzero;
+    v128_t vi8x0123 = vzero;
+
+    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
+    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
+    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
+    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
+    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
+    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
+    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
+    v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
+    v128_t vi8x4567 = wasm_v128_load(i8); i8 += 4;
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+      const v128_t vi8x89AB = wasm_v128_load(i8); i8 += 4;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+      vi8x0123 = vi8x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5);
+      vi8x4567 = vi8x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Always process the last block of 5..8 pixels.
+    if XNN_LIKELY(w > 4 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
+      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
+      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
+      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
+      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
+      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
+      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
+      v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
+      v128_t vi8x89AB = wasm_v128_load(i8); i8 += 4;
+
+      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
+      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
+      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
+      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
+      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
+      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
+      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
+      vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
+      vi8x89AB = wasm_v128_and(vmask, vi8x89AB);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      vi0x0123 = vi0x4567;
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      vi1x0123 = vi1x4567;
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      vi2x0123 = vi2x4567;
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      vi3x0123 = vi3x4567;
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      vi4x0123 = vi4x4567;
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      vi5x0123 = vi5x4567;
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      vi6x0123 = vi6x4567;
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      vi7x0123 = vi7x4567;
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+      vi8x0123 = vi8x4567;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
+      vi0x4567 = vi0x89AB;
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
+      vi1x4567 = vi1x89AB;
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
+      vi2x4567 = vi2x89AB;
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
+      vi3x4567 = vi3x89AB;
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
+      vi4x4567 = vi4x89AB;
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
+      vi5x4567 = vi5x89AB;
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
+      vi6x4567 = vi6x89AB;
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
+      vi7x4567 = vi7x89AB;
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5);
+      vi8x4567 = vi8x89AB;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      wasm_v128_store(o4, vo4); o4 += 4;
+      wasm_v128_store(o3, vo3); o3 += 4;
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+
+      w -= 4 * sizeof(float);
+    }
+    assert(w >= 1 * sizeof(float));
+    assert(w <= 4 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
+      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
+      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
+      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
+      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
+      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
+      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
+      vi7x4567 = wasm_v128_and(vmask, vi7x4567);
+      vi8x4567 = wasm_v128_and(vmask, vi8x4567);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
+      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
+      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
+      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
+      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
+      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
+      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
+      const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
+      const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
+      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
+      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
+      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
+      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
+      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
+      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
+      const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
+      const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
+      const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
+      const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
+      v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+      vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
+      vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
+
+      if XNN_LIKELY(w & (4 * sizeof(float))) {
+        wasm_v128_store(o4, vo4); o4 += 4;
+        wasm_v128_store(o3, vo3); o3 += 4;
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w & (2 * sizeof(float))) {
+          *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0); o4 += 2;
+          *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+          vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
+          vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
+        }
+        if (w & (1 * sizeof(float))) {
+          *o4 = wasm_f32x4_extract_lane(vo4, 0); o4 += 1;
+          *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i1 + input_width);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o4;
+    o1 = (float*) ((uintptr_t) o0 + input_width);
+    o2 = (float*) ((uintptr_t) o1 + input_width);
+    o3 = (float*) ((uintptr_t) o2 + input_width);
+    o4 = (float*) ((uintptr_t) o3 + input_width);
+
+    output_height = doz(output_height, 5);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-arm.c b/src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-arm.c
deleted file mode 100644
index a6450a3..0000000
--- a/src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-arm.c
+++ /dev/null
@@ -1,682 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-3x4.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float *zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 2);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
-  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
-  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
-  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
-  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
-  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
-  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
-  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
-  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
-  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
-  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
-  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
-  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
-  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = zero;
-  const float* i2 = input;
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
-  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i3 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height <= 2) {
-      i4 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i5 = zero;
-    }
-    if XNN_UNPREDICTABLE(output_height <= 4) {
-      i6 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi5x0123 = vzero;
-    v128_t vi6x0123 = vzero;
-    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
-    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
-    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
-
-    size_t w = input_width;
-    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
-      v128_t vo4567p0 = vbias;
-      v128_t vo4567p1 = vbias;
-      v128_t vo4567p2 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
-      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
-      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x4567, vk02));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x4567, vk02));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x4567, vk12));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x4567, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x4567, vk12));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x4567, vk22));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x4567, vk22));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x4567, vk22));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x4567, vk32));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x4567, vk32));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x4567, vk32));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x4567, vk42));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x4567, vk42));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x4567, vk42));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk01));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk01));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk01));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x3456, vk11));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x3456, vk11));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x3456, vk11));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x3456, vk21));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x3456, vk21));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x3456, vk21));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x3456, vk31));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x3456, vk31));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x3456, vk31));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x3456, vk41));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x3456, vk41));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x3456, vk41));
-
-      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
-      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
-      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
-      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
-      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
-      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
-      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x2345, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x2345, vk00));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x2345, vk00));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x2345, vk10));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x2345, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x2345, vk10));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x2345, vk20));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x2345, vk20));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x2345, vk20));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x2345, vk30));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x2345, vk30));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x2345, vk30));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x2345, vk40));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x2345, vk40));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x2345, vk40));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk03));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk03));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk03));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x5678, vk13));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x5678, vk13));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x5678, vk13));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x5678, vk23));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x5678, vk23));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x5678, vk23));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x5678, vk33));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x5678, vk33));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x5678, vk33));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x5678, vk43));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x5678, vk43));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x5678, vk43));
-
-      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
-      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
-      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
-      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
-      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
-      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
-      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x6789, vk04));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x6789, vk04));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x6789, vk04));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x6789, vk14));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x6789, vk14));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x6789, vk14));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x6789, vk24));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x6789, vk24));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x6789, vk24));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x6789, vk34));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x6789, vk34));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x6789, vk34));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x6789, vk44));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x6789, vk44));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x6789, vk44));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-
-      v128_t vo0 = vo4567p0;
-      v128_t vo1 = vo4567p1;
-      v128_t vo2 = vo4567p2;
-
-      vo0 = wasm_f32x4_max(vo0, vmin);
-      vo1 = wasm_f32x4_max(vo1, vmin);
-      vo2 = wasm_f32x4_max(vo2, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-
-      wasm_v128_store(o2, vo2); o2 += 4;
-      wasm_v128_store(o1, vo1); o1 += 4;
-      wasm_v128_store(o0, vo0); o0 += 4;
-    }
-    // Always process the last block of 5..8 pixels.
-    if XNN_LIKELY(w > 4 * sizeof(float)) {
-      v128_t vo4567p0 = vbias;
-      v128_t vo4567p1 = vbias;
-      v128_t vo4567p2 = vbias;
-
-      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
-      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
-      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
-      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
-      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
-      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
-      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
-
-      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
-      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
-      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
-      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
-      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
-      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
-      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x4567, vk02));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x4567, vk02));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x4567, vk12));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x4567, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x4567, vk12));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x4567, vk22));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x4567, vk22));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x4567, vk22));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x4567, vk32));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x4567, vk32));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x4567, vk32));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x4567, vk42));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x4567, vk42));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x4567, vk42));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk01));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk01));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk01));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x3456, vk11));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x3456, vk11));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x3456, vk11));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x3456, vk21));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x3456, vk21));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x3456, vk21));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x3456, vk31));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x3456, vk31));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x3456, vk31));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x3456, vk41));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x3456, vk41));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x3456, vk41));
-
-      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
-      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
-      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
-      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
-      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
-      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
-      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x2345, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x2345, vk00));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x2345, vk00));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x2345, vk10));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x2345, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x2345, vk10));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x2345, vk20));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x2345, vk20));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x2345, vk20));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x2345, vk30));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x2345, vk30));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x2345, vk30));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x2345, vk40));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x2345, vk40));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x2345, vk40));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk03));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk03));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk03));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x5678, vk13));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x5678, vk13));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x5678, vk13));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x5678, vk23));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x5678, vk23));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x5678, vk23));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x5678, vk33));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x5678, vk33));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x5678, vk33));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x5678, vk43));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x5678, vk43));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x5678, vk43));
-
-      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
-      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
-      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
-      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
-      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
-      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
-      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x6789, vk04));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x6789, vk04));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x6789, vk04));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x6789, vk14));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x6789, vk14));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x6789, vk14));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x6789, vk24));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x6789, vk24));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x6789, vk24));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x6789, vk34));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x6789, vk34));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x6789, vk34));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x6789, vk44));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x6789, vk44));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x6789, vk44));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-
-      v128_t vo0 = vo4567p0;
-      v128_t vo1 = vo4567p1;
-      v128_t vo2 = vo4567p2;
-
-      vo0 = wasm_f32x4_max(vo0, vmin);
-      vo1 = wasm_f32x4_max(vo1, vmin);
-      vo2 = wasm_f32x4_max(vo2, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-
-      wasm_v128_store(o2, vo2); o2 += 4;
-      wasm_v128_store(o1, vo1); o1 += 4;
-      wasm_v128_store(o0, vo0); o0 += 4;
-      w -= 4 * sizeof(float);
-    }
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      v128_t vo4567p0 = vbias;
-      v128_t vo4567p1 = vbias;
-      v128_t vo4567p2 = vbias;
-
-      // This might have already happened if there are more than 4 pixels, but we can't count on it.
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
-      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x4567, vk02));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x4567, vk02));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x4567, vk12));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x4567, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x4567, vk12));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x4567, vk22));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x4567, vk22));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x4567, vk22));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x4567, vk32));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x4567, vk32));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x4567, vk32));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x4567, vk42));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x4567, vk42));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x4567, vk42));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk01));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk01));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk01));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x3456, vk11));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x3456, vk11));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x3456, vk11));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x3456, vk21));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x3456, vk21));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x3456, vk21));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x3456, vk31));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x3456, vk31));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x3456, vk31));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x3456, vk41));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x3456, vk41));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x3456, vk41));
-
-      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
-      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
-      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
-      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
-      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
-      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
-      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x2345, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x2345, vk00));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x2345, vk00));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x2345, vk10));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x2345, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x2345, vk10));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x2345, vk20));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x2345, vk20));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x2345, vk20));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x2345, vk30));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x2345, vk30));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x2345, vk30));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x2345, vk40));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x2345, vk40));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x2345, vk40));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk03));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk03));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk03));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x5678, vk13));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x5678, vk13));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x5678, vk13));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x5678, vk23));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x5678, vk23));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x5678, vk23));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x5678, vk33));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x5678, vk33));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x5678, vk33));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x5678, vk43));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x5678, vk43));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x5678, vk43));
-
-      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vzero, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x6789, vk04));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x6789, vk04));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x6789, vk04));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x6789, vk14));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x6789, vk14));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x6789, vk14));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x6789, vk24));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x6789, vk24));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x6789, vk24));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x6789, vk34));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x6789, vk34));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x6789, vk34));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x6789, vk44));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x6789, vk44));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x6789, vk44));
-
-      v128_t vo0 = vo4567p0;
-      v128_t vo1 = vo4567p1;
-      v128_t vo2 = vo4567p2;
-
-      vo0 = wasm_f32x4_max(vo0, vmin);
-      vo1 = wasm_f32x4_max(vo1, vmin);
-      vo2 = wasm_f32x4_max(vo2, vmin);
-
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-
-      if XNN_LIKELY(w & (4 * sizeof(float))) {
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-    i5 = (const float*) ((uintptr_t) i4 + input_width);
-    i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-    o0 = o2;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-
-    output_height = doz(output_height, 3);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-x86.c b/src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-x86.c
deleted file mode 100644
index 5e81956..0000000
--- a/src/f32-dwconv2d-chw/gen/5x5p2-wasmsimd-3x4-x86.c
+++ /dev/null
@@ -1,682 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-3x4.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float *zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top == 2);
-
-  const v128_t vmask = wasm_v128_load(params->scalar.mask);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
-  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
-  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
-  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
-  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
-  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
-  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
-  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
-  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
-  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
-  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
-  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
-  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
-  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
-
-  const float* i0 = zero;
-  const float* i1 = zero;
-  const float* i2 = input;
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
-  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-  float* o0 = output;
-  float* o1 = (float*) ((uintptr_t) o0 + input_width);
-  float* o2 = (float*) ((uintptr_t) o1 + input_width);
-
-  size_t output_height = input_height;
-  do {
-    if XNN_UNPREDICTABLE(output_height < 2) {
-      i3 = zero;
-      o1 = o0;
-    }
-    if XNN_UNPREDICTABLE(output_height <= 2) {
-      i4 = zero;
-      o2 = o1;
-    }
-    if XNN_UNPREDICTABLE(output_height < 4) {
-      i5 = zero;
-    }
-    if XNN_UNPREDICTABLE(output_height <= 4) {
-      i6 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi5x0123 = vzero;
-    v128_t vi6x0123 = vzero;
-    v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
-    v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
-    v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
-
-    size_t w = input_width;
-    for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
-      v128_t vo4567p0 = vbias;
-      v128_t vo4567p1 = vbias;
-      v128_t vo4567p2 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
-      const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
-      const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
-      const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
-      const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
-      const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
-      const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x4567, vk02));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x4567, vk02));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x4567, vk12));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x4567, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x4567, vk12));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x4567, vk22));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x4567, vk22));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x4567, vk22));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x4567, vk32));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x4567, vk32));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x4567, vk32));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x4567, vk42));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x4567, vk42));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x4567, vk42));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk01));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk01));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk01));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x3456, vk11));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x3456, vk11));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x3456, vk11));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x3456, vk21));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x3456, vk21));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x3456, vk21));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x3456, vk31));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x3456, vk31));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x3456, vk31));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x3456, vk41));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x3456, vk41));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x3456, vk41));
-
-      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
-      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
-      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
-      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
-      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
-      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
-      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x2345, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x2345, vk00));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x2345, vk00));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x2345, vk10));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x2345, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x2345, vk10));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x2345, vk20));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x2345, vk20));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x2345, vk20));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x2345, vk30));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x2345, vk30));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x2345, vk30));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x2345, vk40));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x2345, vk40));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x2345, vk40));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk03));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk03));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk03));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x5678, vk13));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x5678, vk13));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x5678, vk13));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x5678, vk23));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x5678, vk23));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x5678, vk23));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x5678, vk33));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x5678, vk33));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x5678, vk33));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x5678, vk43));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x5678, vk43));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x5678, vk43));
-
-      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
-      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
-      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
-      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
-      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
-      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
-      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x6789, vk04));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x6789, vk04));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x6789, vk04));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x6789, vk14));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x6789, vk14));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x6789, vk14));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x6789, vk24));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x6789, vk24));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x6789, vk24));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x6789, vk34));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x6789, vk34));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x6789, vk34));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x6789, vk44));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x6789, vk44));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x6789, vk44));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-
-      v128_t vo0 = vo4567p0;
-      v128_t vo1 = vo4567p1;
-      v128_t vo2 = vo4567p2;
-
-      vo0 = wasm_v128_bitselect(vmin, vo0, wasm_f32x4_lt(vo0, vmin));
-      vo1 = wasm_v128_bitselect(vmin, vo1, wasm_f32x4_lt(vo1, vmin));
-      vo2 = wasm_v128_bitselect(vmin, vo2, wasm_f32x4_lt(vo2, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-
-      wasm_v128_store(o2, vo2); o2 += 4;
-      wasm_v128_store(o1, vo1); o1 += 4;
-      wasm_v128_store(o0, vo0); o0 += 4;
-    }
-    // Always process the last block of 5..8 pixels.
-    if XNN_LIKELY(w > 4 * sizeof(float)) {
-      v128_t vo4567p0 = vbias;
-      v128_t vo4567p1 = vbias;
-      v128_t vo4567p2 = vbias;
-
-      v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
-      v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
-      v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
-      v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
-      v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
-      v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
-      v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
-
-      vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
-      vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
-      vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
-      vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
-      vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
-      vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
-      vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x4567, vk02));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x4567, vk02));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x4567, vk12));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x4567, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x4567, vk12));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x4567, vk22));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x4567, vk22));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x4567, vk22));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x4567, vk32));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x4567, vk32));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x4567, vk32));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x4567, vk42));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x4567, vk42));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x4567, vk42));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk01));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk01));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk01));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x3456, vk11));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x3456, vk11));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x3456, vk11));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x3456, vk21));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x3456, vk21));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x3456, vk21));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x3456, vk31));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x3456, vk31));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x3456, vk31));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x3456, vk41));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x3456, vk41));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x3456, vk41));
-
-      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
-      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
-      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
-      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
-      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
-      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
-      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x2345, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x2345, vk00));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x2345, vk00));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x2345, vk10));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x2345, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x2345, vk10));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x2345, vk20));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x2345, vk20));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x2345, vk20));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x2345, vk30));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x2345, vk30));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x2345, vk30));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x2345, vk40));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x2345, vk40));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x2345, vk40));
-
-      vi0x0123 = vi0x4567;
-      vi1x0123 = vi1x4567;
-      vi2x0123 = vi2x4567;
-      vi3x0123 = vi3x4567;
-      vi4x0123 = vi4x4567;
-      vi5x0123 = vi5x4567;
-      vi6x0123 = vi6x4567;
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk03));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk03));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk03));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x5678, vk13));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x5678, vk13));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x5678, vk13));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x5678, vk23));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x5678, vk23));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x5678, vk23));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x5678, vk33));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x5678, vk33));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x5678, vk33));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x5678, vk43));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x5678, vk43));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x5678, vk43));
-
-      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
-      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
-      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
-      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
-      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
-      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
-      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x6789, vk04));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x6789, vk04));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x6789, vk04));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x6789, vk14));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x6789, vk14));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x6789, vk14));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x6789, vk24));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x6789, vk24));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x6789, vk24));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x6789, vk34));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x6789, vk34));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x6789, vk34));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x6789, vk44));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x6789, vk44));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x6789, vk44));
-
-      vi0x4567 = vi0x89AB;
-      vi1x4567 = vi1x89AB;
-      vi2x4567 = vi2x89AB;
-      vi3x4567 = vi3x89AB;
-      vi4x4567 = vi4x89AB;
-      vi5x4567 = vi5x89AB;
-      vi6x4567 = vi6x89AB;
-
-      v128_t vo0 = vo4567p0;
-      v128_t vo1 = vo4567p1;
-      v128_t vo2 = vo4567p2;
-
-      vo0 = wasm_v128_bitselect(vmin, vo0, wasm_f32x4_lt(vo0, vmin));
-      vo1 = wasm_v128_bitselect(vmin, vo1, wasm_f32x4_lt(vo1, vmin));
-      vo2 = wasm_v128_bitselect(vmin, vo2, wasm_f32x4_lt(vo2, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
-      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
-
-      wasm_v128_store(o2, vo2); o2 += 4;
-      wasm_v128_store(o1, vo1); o1 += 4;
-      wasm_v128_store(o0, vo0); o0 += 4;
-      w -= 4 * sizeof(float);
-    }
-    assert(w >= 1 * sizeof(float));
-    assert(w <= 4 * sizeof(float));
-    {
-      v128_t vo4567p0 = vbias;
-      v128_t vo4567p1 = vbias;
-      v128_t vo4567p2 = vbias;
-
-      // This might have already happened if there are more than 4 pixels, but we can't count on it.
-      vi0x4567 = wasm_v128_and(vmask, vi0x4567);
-      vi1x4567 = wasm_v128_and(vmask, vi1x4567);
-      vi2x4567 = wasm_v128_and(vmask, vi2x4567);
-      vi3x4567 = wasm_v128_and(vmask, vi3x4567);
-      vi4x4567 = wasm_v128_and(vmask, vi4x4567);
-      vi5x4567 = wasm_v128_and(vmask, vi5x4567);
-      vi6x4567 = wasm_v128_and(vmask, vi6x4567);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x4567, vk02));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x4567, vk02));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x4567, vk02));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x4567, vk12));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x4567, vk12));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x4567, vk12));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x4567, vk22));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x4567, vk22));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x4567, vk22));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x4567, vk32));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x4567, vk32));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x4567, vk32));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x4567, vk42));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x4567, vk42));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x4567, vk42));
-
-      const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
-      const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
-      const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
-      const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
-      const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
-      const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
-      const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x3456, vk01));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x3456, vk01));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x3456, vk01));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x3456, vk11));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x3456, vk11));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x3456, vk11));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x3456, vk21));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x3456, vk21));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x3456, vk21));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x3456, vk31));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x3456, vk31));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x3456, vk31));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x3456, vk41));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x3456, vk41));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x3456, vk41));
-
-      const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
-      const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
-      const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
-      const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
-      const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
-      const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
-      const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x2345, vk00));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x2345, vk00));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x2345, vk00));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x2345, vk10));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x2345, vk10));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x2345, vk10));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x2345, vk20));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x2345, vk20));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x2345, vk20));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x2345, vk30));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x2345, vk30));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x2345, vk30));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x2345, vk40));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x2345, vk40));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x2345, vk40));
-
-      const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
-      const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x5678, vk03));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x5678, vk03));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x5678, vk03));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x5678, vk13));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x5678, vk13));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x5678, vk13));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x5678, vk23));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x5678, vk23));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x5678, vk23));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x5678, vk33));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x5678, vk33));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x5678, vk33));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x5678, vk43));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x5678, vk43));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x5678, vk43));
-
-      const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vzero, 2, 3, 4, 5);
-      const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vzero, 2, 3, 4, 5);
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi0x6789, vk04));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi1x6789, vk04));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi2x6789, vk04));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi1x6789, vk14));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi2x6789, vk14));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi3x6789, vk14));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi2x6789, vk24));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi3x6789, vk24));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi4x6789, vk24));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi3x6789, vk34));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi4x6789, vk34));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi5x6789, vk34));
-
-      vo4567p0 = wasm_f32x4_add(vo4567p0, wasm_f32x4_mul(vi4x6789, vk44));
-      vo4567p1 = wasm_f32x4_add(vo4567p1, wasm_f32x4_mul(vi5x6789, vk44));
-      vo4567p2 = wasm_f32x4_add(vo4567p2, wasm_f32x4_mul(vi6x6789, vk44));
-
-      v128_t vo0 = vo4567p0;
-      v128_t vo1 = vo4567p1;
-      v128_t vo2 = vo4567p2;
-
-      vo0 = wasm_f32x4_max(vo0, vmin);
-      vo1 = wasm_f32x4_max(vo1, vmin);
-      vo2 = wasm_f32x4_max(vo2, vmin);
-
-      vo0 = wasm_f32x4_min(vo0, vmax);
-      vo1 = wasm_f32x4_min(vo1, vmax);
-      vo2 = wasm_f32x4_min(vo2, vmax);
-
-      if XNN_LIKELY(w & (4 * sizeof(float))) {
-        wasm_v128_store(o2, vo2);
-        o2 += 4;
-        wasm_v128_store(o1, vo1);
-        o1 += 4;
-        wasm_v128_store(o0, vo0);
-        o0 += 4;
-      } else {
-        if (w & (2 * sizeof(float))) {
-          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
-          o2 += 2;
-          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
-          o1 += 2;
-          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
-          o0 += 2;
-
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
-          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
-        }
-        if (w & (1 * sizeof(float))) {
-          *o2 = wasm_f32x4_extract_lane(vo2, 0);
-          o2 += 1;
-          *o1 = wasm_f32x4_extract_lane(vo1, 0);
-          o1 += 1;
-          *o0 = wasm_f32x4_extract_lane(vo0, 0);
-          o0 += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i1 + input_width);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-    i5 = (const float*) ((uintptr_t) i4 + input_width);
-    i6 = (const float*) ((uintptr_t) i5 + input_width);
-
-    o0 = o2;
-    o1 = (float*) ((uintptr_t) o0 + input_width);
-    o2 = (float*) ((uintptr_t) o1 + input_width);
-
-    output_height = doz(output_height, 3);
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
new file mode 100644
index 0000000..bf2acb4
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
@@ -0,0 +1,376 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
new file mode 100644
index 0000000..7263c9d
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
@@ -0,0 +1,378 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
new file mode 100644
index 0000000..01975c0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
@@ -0,0 +1,380 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c
new file mode 100644
index 0000000..a09d0f4
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c
@@ -0,0 +1,382 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4.c
new file mode 100644
index 0000000..65b93b1
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4.c
@@ -0,0 +1,374 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
new file mode 100644
index 0000000..8181576
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
@@ -0,0 +1,502 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c
new file mode 100644
index 0000000..bb2e340
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4-acc3.c
@@ -0,0 +1,506 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi3x8ACE, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi3x8ACE, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4.c
new file mode 100644
index 0000000..6aea751
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-2x4.c
@@ -0,0 +1,498 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c
new file mode 100644
index 0000000..8d58b27
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4-acc2.c
@@ -0,0 +1,626 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 11) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+    v128_t vi7x0246 = vzero;
+    v128_t vi8x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+    const v128_t vi7x89AB = wasm_v128_load(i7);
+    const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+    i7 += 8;
+    const v128_t vi8x89AB = wasm_v128_load(i8);
+    const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+    i8 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+    v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+    v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+    v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+    v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi4x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      vi7x0246 = vi7x8ACE;
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+      vi8x0246 = vi8x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7xGHIJ = wasm_v128_load(i7);
+      const v128_t vi7xKLMN = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8xGHIJ = wasm_v128_load(i8);
+      const v128_t vi8xKLMN = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+      const v128_t vi7xGIKM = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 0, 2, 4, 6);
+      const v128_t vi7xHJLN = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 1, 3, 5, 7);
+      const v128_t vi8xGIKM = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 0, 2, 4, 6);
+      const v128_t vi8xHJLN = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vi7xGIKM, 1, 2, 3, 4);
+      vi7x8ACE = vi7xGIKM;
+      vi7x9BDF = vi7xHJLN;
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vi8xGIKM, 1, 2, 3, 4);
+      vi8x8ACE = vi8xGIKM;
+      vi8x9BDF = vi8xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+      vi7x8ACE = wasm_v128_and(vmask_even, vi7x8ACE);
+      vi8x8ACE = wasm_v128_and(vmask_even, vi8x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+      vi7x9BDF = wasm_v128_and(vmask_odd, vi7x9BDF);
+      vi8x9BDF = wasm_v128_and(vmask_odd, vi8x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi4x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4.c
new file mode 100644
index 0000000..3259954
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-loadsplat-3x4.c
@@ -0,0 +1,620 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 11) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+    v128_t vi7x0246 = vzero;
+    v128_t vi8x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+    const v128_t vi7x89AB = wasm_v128_load(i7);
+    const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+    i7 += 8;
+    const v128_t vi8x89AB = wasm_v128_load(i8);
+    const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+    i8 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+    v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+    v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+    v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+    v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      vi7x0246 = vi7x8ACE;
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+      vi8x0246 = vi8x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7xGHIJ = wasm_v128_load(i7);
+      const v128_t vi7xKLMN = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8xGHIJ = wasm_v128_load(i8);
+      const v128_t vi8xKLMN = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+      const v128_t vi7xGIKM = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 0, 2, 4, 6);
+      const v128_t vi7xHJLN = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 1, 3, 5, 7);
+      const v128_t vi8xGIKM = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 0, 2, 4, 6);
+      const v128_t vi8xHJLN = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vi7xGIKM, 1, 2, 3, 4);
+      vi7x8ACE = vi7xGIKM;
+      vi7x9BDF = vi7xHJLN;
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vi8xGIKM, 1, 2, 3, 4);
+      vi8x8ACE = vi8xGIKM;
+      vi8x9BDF = vi8xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+      vi7x8ACE = wasm_v128_and(vmask_even, vi7x8ACE);
+      vi8x8ACE = wasm_v128_and(vmask_even, vi8x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+      vi7x9BDF = wasm_v128_and(vmask_odd, vi7x9BDF);
+      vi8x9BDF = wasm_v128_and(vmask_odd, vi8x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, vk44));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc2.c
new file mode 100644
index 0000000..670641e
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc2.c
@@ -0,0 +1,356 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc3.c
new file mode 100644
index 0000000..225ef6c
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc3.c
@@ -0,0 +1,358 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc4.c
new file mode 100644
index 0000000..8471e39
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc4.c
@@ -0,0 +1,360 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc5.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc5.c
new file mode 100644
index 0000000..332ed69
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc5.c
@@ -0,0 +1,362 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4.c
new file mode 100644
index 0000000..d704d92
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-1x4.c
@@ -0,0 +1,354 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc2.c
new file mode 100644
index 0000000..4a169f0
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc2.c
@@ -0,0 +1,482 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc3.c
new file mode 100644
index 0000000..54aaefb
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4-acc3.c
@@ -0,0 +1,486 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4.c
new file mode 100644
index 0000000..8f3c868
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-2x4.c
@@ -0,0 +1,478 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4-acc2.c
new file mode 100644
index 0000000..0e4845f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4-acc2.c
@@ -0,0 +1,606 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 11) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+    v128_t vi7x0246 = vzero;
+    v128_t vi8x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+    const v128_t vi7x89AB = wasm_v128_load(i7);
+    const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+    i7 += 8;
+    const v128_t vi8x89AB = wasm_v128_load(i8);
+    const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+    i8 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+    v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+    v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+    v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+    v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      vi7x0246 = vi7x8ACE;
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+      vi8x0246 = vi8x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7xGHIJ = wasm_v128_load(i7);
+      const v128_t vi7xKLMN = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8xGHIJ = wasm_v128_load(i8);
+      const v128_t vi8xKLMN = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+      const v128_t vi7xGIKM = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 0, 2, 4, 6);
+      const v128_t vi7xHJLN = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 1, 3, 5, 7);
+      const v128_t vi8xGIKM = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 0, 2, 4, 6);
+      const v128_t vi8xHJLN = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vi7xGIKM, 1, 2, 3, 4);
+      vi7x8ACE = vi7xGIKM;
+      vi7x9BDF = vi7xHJLN;
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vi8xGIKM, 1, 2, 3, 4);
+      vi8x8ACE = vi8xGIKM;
+      vi8x9BDF = vi8xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+      vi7x8ACE = wasm_v128_and(vmask_even, vi7x8ACE);
+      vi8x8ACE = wasm_v128_and(vmask_even, vi8x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+      vi7x9BDF = wasm_v128_and(vmask_odd, vi7x9BDF);
+      vi8x9BDF = wasm_v128_and(vmask_odd, vi8x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4.c
new file mode 100644
index 0000000..d549bcb
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-arm-splat-3x4.c
@@ -0,0 +1,600 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 11) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+    v128_t vi7x0246 = vzero;
+    v128_t vi8x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+    const v128_t vi7x89AB = wasm_v128_load(i7);
+    const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+    i7 += 8;
+    const v128_t vi8x89AB = wasm_v128_load(i8);
+    const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+    i8 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+    v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+    v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+    v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+    v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      vi7x0246 = vi7x8ACE;
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+      vi8x0246 = vi8x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7xGHIJ = wasm_v128_load(i7);
+      const v128_t vi7xKLMN = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8xGHIJ = wasm_v128_load(i8);
+      const v128_t vi8xKLMN = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+      const v128_t vi7xGIKM = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 0, 2, 4, 6);
+      const v128_t vi7xHJLN = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 1, 3, 5, 7);
+      const v128_t vi8xGIKM = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 0, 2, 4, 6);
+      const v128_t vi8xHJLN = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vi7xGIKM, 1, 2, 3, 4);
+      vi7x8ACE = vi7xGIKM;
+      vi7x9BDF = vi7xHJLN;
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vi8xGIKM, 1, 2, 3, 4);
+      vi8x8ACE = vi8xGIKM;
+      vi8x9BDF = vi8xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+      vi7x8ACE = wasm_v128_and(vmask_even, vi7x8ACE);
+      vi8x8ACE = wasm_v128_and(vmask_even, vi8x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+      vi7x9BDF = wasm_v128_and(vmask_odd, vi7x9BDF);
+      vi8x9BDF = wasm_v128_and(vmask_odd, vi8x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
+      v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
+      v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
+      vo0 = wasm_f32x4_min(vo0, vmax);
+      vo1 = wasm_f32x4_min(vo1, vmax);
+      vo2 = wasm_f32x4_min(vo2, vmax);
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
new file mode 100644
index 0000000..8a79c5f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
@@ -0,0 +1,376 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
new file mode 100644
index 0000000..016c5be
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
@@ -0,0 +1,378 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
new file mode 100644
index 0000000..701c91e
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
@@ -0,0 +1,380 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c
new file mode 100644
index 0000000..4b56731
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c
@@ -0,0 +1,382 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, vk32);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4.c
new file mode 100644
index 0000000..b0ee20b
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4.c
@@ -0,0 +1,374 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
new file mode 100644
index 0000000..37c5755
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
@@ -0,0 +1,502 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c
new file mode 100644
index 0000000..603996a
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4-acc3.c
@@ -0,0 +1,506 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi3x8ACE, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
+      v128_t vo1p2 = wasm_f32x4_mul(vi3x8ACE, vk12);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4.c
new file mode 100644
index 0000000..0349966
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-2x4.c
@@ -0,0 +1,498 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c
new file mode 100644
index 0000000..e6886ce
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4-acc2.c
@@ -0,0 +1,626 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 11) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+    v128_t vi7x0246 = vzero;
+    v128_t vi8x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+    const v128_t vi7x89AB = wasm_v128_load(i7);
+    const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+    i7 += 8;
+    const v128_t vi8x89AB = wasm_v128_load(i8);
+    const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+    i8 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+    v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+    v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+    v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+    v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi4x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      vi7x0246 = vi7x8ACE;
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+      vi8x0246 = vi8x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7xGHIJ = wasm_v128_load(i7);
+      const v128_t vi7xKLMN = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8xGHIJ = wasm_v128_load(i8);
+      const v128_t vi8xKLMN = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+      const v128_t vi7xGIKM = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 0, 2, 4, 6);
+      const v128_t vi7xHJLN = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 1, 3, 5, 7);
+      const v128_t vi8xGIKM = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 0, 2, 4, 6);
+      const v128_t vi8xHJLN = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vi7xGIKM, 1, 2, 3, 4);
+      vi7x8ACE = vi7xGIKM;
+      vi7x9BDF = vi7xHJLN;
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vi8xGIKM, 1, 2, 3, 4);
+      vi8x8ACE = vi8xGIKM;
+      vi8x9BDF = vi8xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+      vi7x8ACE = wasm_v128_and(vmask_even, vi7x8ACE);
+      vi8x8ACE = wasm_v128_and(vmask_even, vi8x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+      vi7x9BDF = wasm_v128_and(vmask_odd, vi7x9BDF);
+      vi8x9BDF = wasm_v128_and(vmask_odd, vi8x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
+      v128_t vo2p1 = wasm_f32x4_mul(vi4x8ACE, vk02);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk22));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, vk42));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk13));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, vk33));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, vk00));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, vk20));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, vk11));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, vk31));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, vk04));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, vk24));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, vk44));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4.c
new file mode 100644
index 0000000..887865c
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-loadsplat-3x4.c
@@ -0,0 +1,620 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4(
+
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vbias = wasm_v32x4_load_splat(weights);
+  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
+  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
+  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
+  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
+  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
+  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
+  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
+  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
+  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
+  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
+  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
+  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
+  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
+  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
+  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
+  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
+  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
+  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
+  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
+  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
+  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
+  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
+  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
+  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
+  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 11) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+    v128_t vi7x0246 = vzero;
+    v128_t vi8x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+    const v128_t vi7x89AB = wasm_v128_load(i7);
+    const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+    i7 += 8;
+    const v128_t vi8x89AB = wasm_v128_load(i8);
+    const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+    i8 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+    v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+    v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+    v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+    v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      vi7x0246 = vi7x8ACE;
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+      vi8x0246 = vi8x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7xGHIJ = wasm_v128_load(i7);
+      const v128_t vi7xKLMN = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8xGHIJ = wasm_v128_load(i8);
+      const v128_t vi8xKLMN = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+      const v128_t vi7xGIKM = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 0, 2, 4, 6);
+      const v128_t vi7xHJLN = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 1, 3, 5, 7);
+      const v128_t vi8xGIKM = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 0, 2, 4, 6);
+      const v128_t vi8xHJLN = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vi7xGIKM, 1, 2, 3, 4);
+      vi7x8ACE = vi7xGIKM;
+      vi7x9BDF = vi7xHJLN;
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vi8xGIKM, 1, 2, 3, 4);
+      vi8x8ACE = vi8xGIKM;
+      vi8x9BDF = vi8xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = vbias;
+      v128_t vo1p0 = vbias;
+      v128_t vo2p0 = vbias;
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+      vi7x8ACE = wasm_v128_and(vmask_even, vi7x8ACE);
+      vi8x8ACE = wasm_v128_and(vmask_even, vi8x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+      vi7x9BDF = wasm_v128_and(vmask_odd, vi7x9BDF);
+      vi8x9BDF = wasm_v128_and(vmask_odd, vi8x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk02));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk02));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk12));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk22));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, vk32));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x8ACE, vk32));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, vk42));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk03));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk03));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk13));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk23));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk23));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, vk33));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, vk43));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x9BDF, vk43));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, vk00));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, vk10));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x68AC, vk10));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, vk20));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, vk30));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x68AC, vk30));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, vk40));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, vk01));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x79BD, vk01));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, vk11));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, vk21));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x79BD, vk21));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, vk31));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, vk41));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x79BD, vk41));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, vk04));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, vk14));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5xACEG, vk14));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, vk24));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, vk34));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7xACEG, vk34));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, vk44));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc2.c
new file mode 100644
index 0000000..7cc9a3f
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc2.c
@@ -0,0 +1,356 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc3.c
new file mode 100644
index 0000000..aecfad5
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc3.c
@@ -0,0 +1,358 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc4.c
new file mode 100644
index 0000000..ed1ccd2
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc4.c
@@ -0,0 +1,360 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc5.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc5.c
new file mode 100644
index 0000000..7acbecf
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc5.c
@@ -0,0 +1,362 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
+
+      v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4.c
new file mode 100644
index 0000000..bbd64c5
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-1x4.c
@@ -0,0 +1,354 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+  float* o0 = output;
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+
+
+    output_height -= 1;
+    padded_input_height -= 2;
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc2.c
new file mode 100644
index 0000000..ccef599
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc2.c
@@ -0,0 +1,482 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc3.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc3.c
new file mode 100644
index 0000000..e478b42
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4-acc3.c
@@ -0,0 +1,486 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+      v128_t vo1p2 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4.c
new file mode 100644
index 0000000..37c78f7
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-2x4.c
@@ -0,0 +1,478 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i4 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i5 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+
+    o0 = o1;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+
+    output_height = doz(output_height, 2);
+    padded_input_height = doz(padded_input_height, 4);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4-acc2.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4-acc2.c
new file mode 100644
index 0000000..9efcdd7
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4-acc2.c
@@ -0,0 +1,606 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 11) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+    v128_t vi7x0246 = vzero;
+    v128_t vi8x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+    const v128_t vi7x89AB = wasm_v128_load(i7);
+    const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+    i7 += 8;
+    const v128_t vi8x89AB = wasm_v128_load(i8);
+    const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+    i8 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+    v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+    v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+    v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+    v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      vi7x0246 = vi7x8ACE;
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+      vi8x0246 = vi8x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7xGHIJ = wasm_v128_load(i7);
+      const v128_t vi7xKLMN = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8xGHIJ = wasm_v128_load(i8);
+      const v128_t vi8xKLMN = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+      const v128_t vi7xGIKM = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 0, 2, 4, 6);
+      const v128_t vi7xHJLN = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 1, 3, 5, 7);
+      const v128_t vi8xGIKM = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 0, 2, 4, 6);
+      const v128_t vi8xHJLN = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 1, 3, 5, 7);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vi7xGIKM, 1, 2, 3, 4);
+      vi7x8ACE = vi7xGIKM;
+      vi7x9BDF = vi7xHJLN;
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vi8xGIKM, 1, 2, 3, 4);
+      vi8x8ACE = vi8xGIKM;
+      vi8x9BDF = vi8xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+      vi7x8ACE = wasm_v128_and(vmask_even, vi7x8ACE);
+      vi8x8ACE = wasm_v128_and(vmask_even, vi8x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+      vi7x9BDF = wasm_v128_and(vmask_odd, vi7x9BDF);
+      vi8x9BDF = wasm_v128_and(vmask_odd, vi8x9BDF);
+
+      v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+      v128_t vo2p1 = wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi8x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi7xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
+      vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
+      vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4.c
new file mode 100644
index 0000000..af1b14a
--- /dev/null
+++ b/src/f32-dwconv2d-chw/gen/5x5s2p2-minmax-wasmsimd-x86-splat-3x4.c
@@ -0,0 +1,600 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/math.h>
+
+
+
+void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4(
+    size_t input_height,
+    size_t input_width,
+    const float* input,
+    const float* weights,
+    const float* zero,
+    float* output,
+    uint32_t padding_top,
+    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(input_height != 0);
+  assert(input_width != 0);
+  assert(input_width % sizeof(float) == 0);
+  assert(padding_top >= 1);
+  assert(padding_top <= 2);
+
+  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
+  const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+
+  const v128_t vw0123 = wasm_v128_load(weights);
+  const v128_t vw4567 = wasm_v128_load(weights + 4);
+  const v128_t vw89AB = wasm_v128_load(weights + 8);
+  const v128_t vwCDEF = wasm_v128_load(weights + 12);
+  const v128_t vwGHIJ = wasm_v128_load(weights + 16);
+  const v128_t vwKLMN = wasm_v128_load(weights + 20);
+  const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
+
+  const v128_t vzero = wasm_f32x4_splat(0.0f);
+
+  const uint32_t padding_top_less_1 = padding_top - 1;
+  const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
+
+  const float* i0 = zero;
+  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
+  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
+  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
+    i1 = zero;
+  }
+  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
+  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
+  const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
+  const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
+  const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
+  const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+  const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
+
+  float* o0 = output;
+  float* o1 = (float*) ((uintptr_t) o0 + output_width);
+  float* o2 = (float*) ((uintptr_t) o1 + output_width);
+
+  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
+  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
+  do {
+    if XNN_UNPREDICTABLE(padded_input_height < 6) {
+      i3 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 7) {
+      i4 = zero;
+      o1 = o0;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 8) {
+      i5 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 9) {
+      i6 = zero;
+      o2 = o1;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 10) {
+      i7 = zero;
+    }
+    if XNN_UNPREDICTABLE(padded_input_height < 11) {
+      i8 = zero;
+    }
+
+    v128_t vi0x0246 = vzero;
+    v128_t vi1x0246 = vzero;
+    v128_t vi2x0246 = vzero;
+    v128_t vi3x0246 = vzero;
+    v128_t vi4x0246 = vzero;
+    v128_t vi5x0246 = vzero;
+    v128_t vi6x0246 = vzero;
+    v128_t vi7x0246 = vzero;
+    v128_t vi8x0246 = vzero;
+
+    v128_t vi0x1357 = vzero;
+    v128_t vi1x1357 = vzero;
+    v128_t vi2x1357 = vzero;
+    v128_t vi3x1357 = vzero;
+    v128_t vi4x1357 = vzero;
+    v128_t vi5x1357 = vzero;
+    v128_t vi6x1357 = vzero;
+    v128_t vi7x1357 = vzero;
+    v128_t vi8x1357 = vzero;
+
+    const v128_t vi0x89AB = wasm_v128_load(i0);
+    const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
+    i0 += 8;
+    const v128_t vi1x89AB = wasm_v128_load(i1);
+    const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
+    i1 += 8;
+    const v128_t vi2x89AB = wasm_v128_load(i2);
+    const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
+    i2 += 8;
+    const v128_t vi3x89AB = wasm_v128_load(i3);
+    const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
+    i3 += 8;
+    const v128_t vi4x89AB = wasm_v128_load(i4);
+    const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
+    i4 += 8;
+    const v128_t vi5x89AB = wasm_v128_load(i5);
+    const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
+    i5 += 8;
+    const v128_t vi6x89AB = wasm_v128_load(i6);
+    const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
+    i6 += 8;
+    const v128_t vi7x89AB = wasm_v128_load(i7);
+    const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
+    i7 += 8;
+    const v128_t vi8x89AB = wasm_v128_load(i8);
+    const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
+    i8 += 8;
+
+    v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
+    v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
+    v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
+    v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
+    v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
+    v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
+    v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
+    v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
+    v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
+    v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
+    v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
+    v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
+    v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
+    v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
+    v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
+    v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
+    v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
+    v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
+
+    size_t w = input_width;
+    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      vi0x0246 = vi0x8ACE;
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      vi1x0246 = vi1x8ACE;
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      vi2x0246 = vi2x8ACE;
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      vi3x0246 = vi3x8ACE;
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      vi4x0246 = vi4x8ACE;
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      vi5x0246 = vi5x8ACE;
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      vi6x0246 = vi6x8ACE;
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      vi7x0246 = vi7x8ACE;
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+      vi8x0246 = vi8x8ACE;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      vi0x1357 = vi0x9BDF;
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      vi1x1357 = vi1x9BDF;
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      vi2x1357 = vi2x9BDF;
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      vi3x1357 = vi3x9BDF;
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      vi4x1357 = vi4x9BDF;
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      vi5x1357 = vi5x9BDF;
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      vi6x1357 = vi6x9BDF;
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      vi7x1357 = vi7x9BDF;
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+      vi8x1357 = vi8x9BDF;
+
+      const v128_t vi0xGHIJ = wasm_v128_load(i0);
+      const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
+      i0 += 8;
+      const v128_t vi1xGHIJ = wasm_v128_load(i1);
+      const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
+      i1 += 8;
+      const v128_t vi2xGHIJ = wasm_v128_load(i2);
+      const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
+      i2 += 8;
+      const v128_t vi3xGHIJ = wasm_v128_load(i3);
+      const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
+      i3 += 8;
+      const v128_t vi4xGHIJ = wasm_v128_load(i4);
+      const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
+      i4 += 8;
+      const v128_t vi5xGHIJ = wasm_v128_load(i5);
+      const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
+      i5 += 8;
+      const v128_t vi6xGHIJ = wasm_v128_load(i6);
+      const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
+      i6 += 8;
+      const v128_t vi7xGHIJ = wasm_v128_load(i7);
+      const v128_t vi7xKLMN = wasm_v128_load(i7 + 4);
+      i7 += 8;
+      const v128_t vi8xGHIJ = wasm_v128_load(i8);
+      const v128_t vi8xKLMN = wasm_v128_load(i8 + 4);
+      i8 += 8;
+
+      const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
+      const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
+      const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
+      const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
+      const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
+      const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
+      const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
+      const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
+      const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
+      const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
+      const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
+      const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
+      const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
+      const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
+      const v128_t vi7xGIKM = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 0, 2, 4, 6);
+      const v128_t vi7xHJLN = wasm_v32x4_shuffle(vi7xGHIJ, vi7xKLMN, 1, 3, 5, 7);
+      const v128_t vi8xGIKM = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 0, 2, 4, 6);
+      const v128_t vi8xHJLN = wasm_v32x4_shuffle(vi8xGHIJ, vi8xKLMN, 1, 3, 5, 7);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
+      vi0x8ACE = vi0xGIKM;
+      vi0x9BDF = vi0xHJLN;
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
+      vi1x8ACE = vi1xGIKM;
+      vi1x9BDF = vi1xHJLN;
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
+      vi2x8ACE = vi2xGIKM;
+      vi2x9BDF = vi2xHJLN;
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
+      vi3x8ACE = vi3xGIKM;
+      vi3x9BDF = vi3xHJLN;
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
+      vi4x8ACE = vi4xGIKM;
+      vi4x9BDF = vi4xHJLN;
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
+      vi5x8ACE = vi5xGIKM;
+      vi5x9BDF = vi5xHJLN;
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
+      vi6x8ACE = vi6xGIKM;
+      vi6x9BDF = vi6xHJLN;
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vi7xGIKM, 1, 2, 3, 4);
+      vi7x8ACE = vi7xGIKM;
+      vi7x9BDF = vi7xHJLN;
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vi8xGIKM, 1, 2, 3, 4);
+      vi8x8ACE = vi8xGIKM;
+      vi8x9BDF = vi8xHJLN;
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      wasm_v128_store(o2, vo2); o2 += 4;
+      wasm_v128_store(o1, vo1); o1 += 4;
+      wasm_v128_store(o0, vo0); o0 += 4;
+    }
+    // Last block has 1-8 pixels to process.
+    assert(w <= 8 * sizeof(float));
+    assert(w >= 1 * sizeof(float));
+    {
+      v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+      v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
+
+      vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
+      vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
+      vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
+      vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
+      vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
+      vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
+      vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
+      vi7x8ACE = wasm_v128_and(vmask_even, vi7x8ACE);
+      vi8x8ACE = wasm_v128_and(vmask_even, vi8x8ACE);
+
+      vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
+      vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
+      vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
+      vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
+      vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
+      vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
+      vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
+      vi7x9BDF = wasm_v128_and(vmask_odd, vi7x9BDF);
+      vi8x9BDF = wasm_v128_and(vmask_odd, vi8x9BDF);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
+
+      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
+      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
+      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
+      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
+      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
+      const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
+      const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
+      const v128_t vi7x68AC = wasm_v32x4_shuffle(vi7x0246, vi7x8ACE, 3, 4, 5, 6);
+      const v128_t vi8x68AC = wasm_v32x4_shuffle(vi8x0246, vi8x8ACE, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
+
+      const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
+      const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
+      const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
+      const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
+      const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
+      const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
+      const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
+      const v128_t vi7x79BD = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
+      const v128_t vi8x79BD = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
+
+      const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi7xACEG = wasm_v32x4_shuffle(vi7x8ACE, vzero, 1, 2, 3, 4);
+      const v128_t vi8xACEG = wasm_v32x4_shuffle(vi8x8ACE, vzero, 1, 2, 3, 4);
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi7xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
+
+      vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+      vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi8xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
+
+
+      v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
+      v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
+      v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
+      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
+      vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
+      vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
+
+      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
+      if XNN_LIKELY(w_tmp >= 4) {
+        wasm_v128_store(o2, vo2); o2 += 4;
+        wasm_v128_store(o1, vo1); o1 += 4;
+        wasm_v128_store(o0, vo0); o0 += 4;
+      } else {
+        if (w_tmp & 2) {
+          *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
+          *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
+          *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
+
+          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
+          vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
+          vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
+        }
+        if (w_tmp & 1) {
+          *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
+          *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
+          *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
+        }
+      }
+    }
+
+    i0 = (const float*) ((uintptr_t) i6 - input_decrement);
+    i1 = (const float*) ((uintptr_t) i7 - input_decrement);
+    i2 = (const float*) ((uintptr_t) i8 - input_decrement);
+    i3 = (const float*) ((uintptr_t) i2 + input_width);
+    i4 = (const float*) ((uintptr_t) i3 + input_width);
+    i5 = (const float*) ((uintptr_t) i4 + input_width);
+    i6 = (const float*) ((uintptr_t) i5 + input_width);
+    i7 = (const float*) ((uintptr_t) i6 + input_width);
+    i8 = (const float*) ((uintptr_t) i7 + input_width);
+
+    o0 = o2;
+    o1 = (float*) ((uintptr_t) o0 + output_width);
+    o2 = (float*) ((uintptr_t) o1 + output_width);
+
+    output_height = doz(output_height, 3);
+    padded_input_height = doz(padded_input_height, 6);
+  } while (output_height != 0);
+}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-arm.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-arm.c
deleted file mode 100644
index c2ed61b..0000000
--- a/src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-arm.c
+++ /dev/null
@@ -1,385 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-1x4-acc2.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top >= 1);
-  assert(padding_top <= 2);
-
-  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
-  const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
-  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
-  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
-  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
-  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
-  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
-  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
-  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
-  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
-  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
-  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
-  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
-  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
-  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const uint32_t padding_top_less_1 = padding_top - 1;
-  const size_t input_decrement = round_down_po2(input_width - 1 * sizeof(float), 4 * sizeof(float)) + 4 * sizeof(float);
-
-  const float* i0 = zero;
-  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
-    i1 = zero;
-  }
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
-  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
-  do {
-    if XNN_UNPREDICTABLE(padded_input_height <= 6) {
-      i4 = zero;
-    }
-    if XNN_UNPREDICTABLE(padded_input_height < 6) {
-      i3 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-
-    size_t w = input_width;
-    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
-      v128_t vo468Ap0 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-
-      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
-      i0 += 8;
-      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
-      i1 += 8;
-      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
-      i2 += 8;
-      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
-      i3 += 8;
-      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
-      i4 += 8;
-
-      const v128_t vi0x468A = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi0x579B = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi1x468A = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi1x579B = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi2x468A = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi2x579B = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi3x468A = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi3x579B = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi4x468A = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi4x579B = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 3, 4 + 1, 4 + 3);
-
-      // middle tap
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x468A, vk02));
-      v128_t vo468Ap1 = wasm_f32x4_mul(vi1x468A, vk12);
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x468A, vk22));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x468A, vk32));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x468A, vk42));
-
-      // one left
-      const v128_t vi0x3579 = wasm_v32x4_shuffle(vi0x0123, vi0x579B, 3, 4, 5, 6);
-      const v128_t vi1x3579 = wasm_v32x4_shuffle(vi1x0123, vi1x579B, 3, 4, 5, 6);
-      const v128_t vi2x3579 = wasm_v32x4_shuffle(vi2x0123, vi2x579B, 3, 4, 5, 6);
-      const v128_t vi3x3579 = wasm_v32x4_shuffle(vi3x0123, vi3x579B, 3, 4, 5, 6);
-      const v128_t vi4x3579 = wasm_v32x4_shuffle(vi4x0123, vi4x579B, 3, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x3579, vk01));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x3579, vk11));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x3579, vk21));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x3579, vk31));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x3579, vk41));
-
-      // two left
-      const v128_t vi0x2468 = wasm_v32x4_shuffle(vi0x0123, vi0x468A, 2, 4, 5, 6);
-      const v128_t vi1x2468 = wasm_v32x4_shuffle(vi1x0123, vi1x468A, 2, 4, 5, 6);
-      const v128_t vi2x2468 = wasm_v32x4_shuffle(vi2x0123, vi2x468A, 2, 4, 5, 6);
-      const v128_t vi3x2468 = wasm_v32x4_shuffle(vi3x0123, vi3x468A, 2, 4, 5, 6);
-      const v128_t vi4x2468 = wasm_v32x4_shuffle(vi4x0123, vi4x468A, 2, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x2468, vk00));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x2468, vk10));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x2468, vk20));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x2468, vk30));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x2468, vk40));
-
-      vi0x0123 = vi0x89AB;
-      vi1x0123 = vi1x89AB;
-      vi2x0123 = vi2x89AB;
-      vi3x0123 = vi3x89AB;
-      vi4x0123 = vi4x89AB;
-
-      // one right
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x579B, vk03));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x579B, vk13));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x579B, vk23));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x579B, vk33));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x579B, vk43));
-
-      // two right
-      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x468A, vi0xCDEF, 1, 2, 3, 4);
-      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x468A, vi1xCDEF, 1, 2, 3, 4);
-      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x468A, vi2xCDEF, 1, 2, 3, 4);
-      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x468A, vi3xCDEF, 1, 2, 3, 4);
-      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x468A, vi4xCDEF, 1, 2, 3, 4);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x68AC, vk04));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x68AC, vk14));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x68AC, vk24));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x68AC, vk34));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x68AC, vk44));
-
-      vi0x4567 = vi0xCDEF;
-      vi1x4567 = vi1xCDEF;
-      vi2x4567 = vi2xCDEF;
-      vi3x4567 = vi3xCDEF;
-      vi4x4567 = vi4xCDEF;
-
-      v128_t vo0 = wasm_f32x4_add(vo468Ap0, vo468Ap1);
-
-      vo0 = wasm_f32x4_max(vo0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
-      if XNN_LIKELY(w_tmp >= 4) {
-        wasm_v128_store(output, vo0);
-        output += 4;
-      } else {
-        if (w_tmp & 2) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo0, 0);
-          output += 2;
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w_tmp & 1) {
-          *output = wasm_f32x4_extract_lane(vo0, 0);
-          output += 1;
-        }
-      }
-    }
-
-    {
-      v128_t vo468Ap0 = vbias;
-
-      v128_t vi0x89AB = vzero;
-      v128_t vi1x89AB = vzero;
-      v128_t vi2x89AB = vzero;
-      v128_t vi3x89AB = vzero;
-      v128_t vi4x89AB = vzero;
-      if XNN_LIKELY(w > 4 * sizeof(float)) {
-        vi0x89AB = wasm_v128_load(i0);
-        i0 += 4;
-        vi1x89AB = wasm_v128_load(i1);
-        i1 += 4;
-        vi2x89AB = wasm_v128_load(i2);
-        i2 += 4;
-        vi3x89AB = wasm_v128_load(i3);
-        i3 += 4;
-        vi4x89AB = wasm_v128_load(i4);
-        i4 += 4;
-      }
-
-      v128_t vi0xCDEF = vzero;
-      v128_t vi1xCDEF = vzero;
-      v128_t vi2xCDEF = vzero;
-      v128_t vi3xCDEF = vzero;
-      v128_t vi4xCDEF = vzero;
-      if XNN_LIKELY(w > 8 * sizeof(float)) {
-        vi0xCDEF = wasm_v128_load(i0);
-        i0 += 4;
-        vi1xCDEF = wasm_v128_load(i1);
-        i1 += 4;
-        vi2xCDEF = wasm_v128_load(i2);
-        i2 += 4;
-        vi3xCDEF = wasm_v128_load(i3);
-        i3 += 4;
-        vi4xCDEF = wasm_v128_load(i4);
-        i4 += 4;
-      }
-
-      v128_t vi0x468A = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi0x579B = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi1x468A = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi1x579B = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi2x468A = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi2x579B = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi3x468A = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi3x579B = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi4x468A = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi4x579B = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 3, 4 + 1, 4 + 3);
-
-      vi0x468A = wasm_v128_and(vmask_even, vi0x468A);
-      vi1x468A = wasm_v128_and(vmask_even, vi1x468A);
-      vi2x468A = wasm_v128_and(vmask_even, vi2x468A);
-      vi3x468A = wasm_v128_and(vmask_even, vi3x468A);
-      vi4x468A = wasm_v128_and(vmask_even, vi4x468A);
-
-      vi0x579B = wasm_v128_and(vmask_odd, vi0x579B);
-      vi1x579B = wasm_v128_and(vmask_odd, vi1x579B);
-      vi2x579B = wasm_v128_and(vmask_odd, vi2x579B);
-      vi3x579B = wasm_v128_and(vmask_odd, vi3x579B);
-      vi4x579B = wasm_v128_and(vmask_odd, vi4x579B);
-
-      // middle tap
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x468A, vk02));
-      v128_t vo468Ap1 = wasm_f32x4_mul(vi1x468A, vk12);
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x468A, vk22));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x468A, vk32));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x468A, vk42));
-
-      // one left
-      const v128_t vi0x3579 = wasm_v32x4_shuffle(vi0x0123, vi0x579B, 3, 4, 5, 6);
-      const v128_t vi1x3579 = wasm_v32x4_shuffle(vi1x0123, vi1x579B, 3, 4, 5, 6);
-      const v128_t vi2x3579 = wasm_v32x4_shuffle(vi2x0123, vi2x579B, 3, 4, 5, 6);
-      const v128_t vi3x3579 = wasm_v32x4_shuffle(vi3x0123, vi3x579B, 3, 4, 5, 6);
-      const v128_t vi4x3579 = wasm_v32x4_shuffle(vi4x0123, vi4x579B, 3, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x3579, vk01));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x3579, vk11));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x3579, vk21));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x3579, vk31));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x3579, vk41));
-
-      // two left
-      const v128_t vi0x2468 = wasm_v32x4_shuffle(vi0x0123, vi0x468A, 2, 4, 5, 6);
-      const v128_t vi1x2468 = wasm_v32x4_shuffle(vi1x0123, vi1x468A, 2, 4, 5, 6);
-      const v128_t vi2x2468 = wasm_v32x4_shuffle(vi2x0123, vi2x468A, 2, 4, 5, 6);
-      const v128_t vi3x2468 = wasm_v32x4_shuffle(vi3x0123, vi3x468A, 2, 4, 5, 6);
-      const v128_t vi4x2468 = wasm_v32x4_shuffle(vi4x0123, vi4x468A, 2, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x2468, vk00));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x2468, vk10));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x2468, vk20));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x2468, vk30));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x2468, vk40));
-
-      vi0x0123 = vi0x89AB;
-      vi1x0123 = vi1x89AB;
-      vi2x0123 = vi2x89AB;
-      vi3x0123 = vi3x89AB;
-      vi4x0123 = vi4x89AB;
-
-      // one right
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x579B, vk03));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x579B, vk13));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x579B, vk23));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x579B, vk33));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x579B, vk43));
-
-      // two right
-      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x468A, vi0xCDEF, 1, 2, 3, 4);
-      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x468A, vi1xCDEF, 1, 2, 3, 4);
-      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x468A, vi2xCDEF, 1, 2, 3, 4);
-      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x468A, vi3xCDEF, 1, 2, 3, 4);
-      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x468A, vi4xCDEF, 1, 2, 3, 4);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x68AC, vk04));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x68AC, vk14));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x68AC, vk24));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x68AC, vk34));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x68AC, vk44));
-
-      vi0x4567 = vi0xCDEF;
-      vi1x4567 = vi1xCDEF;
-      vi2x4567 = vi2xCDEF;
-      vi3x4567 = vi3xCDEF;
-      vi4x4567 = vi4xCDEF;
-
-      v128_t vo0 = wasm_f32x4_add(vo468Ap0, vo468Ap1);
-
-      vo0 = wasm_f32x4_max(vo0, vmin);
-      vo0 = wasm_f32x4_min(vo0, vmax);
-
-      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
-      if XNN_LIKELY(w_tmp >= 4) {
-        wasm_v128_store(output, vo0);
-        output += 4;
-      } else {
-        if (w_tmp & 2) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo0, 0);
-          output += 2;
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w_tmp & 1) {
-          *output = wasm_f32x4_extract_lane(vo0, 0);
-          output += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-    output_height -= 1;
-    padded_input_height -= 2;
-  } while (output_height != 0);
-}
diff --git a/src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-x86.c b/src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-x86.c
deleted file mode 100644
index cee807d..0000000
--- a/src/f32-dwconv2d-chw/gen/5x5s2p2-wasmsimd-1x4-acc2-x86.c
+++ /dev/null
@@ -1,385 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-1x4-acc2.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-
-#include <xnnpack/dwconv.h>
-#include <xnnpack/math.h>
-
-
-
-void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2(
-    size_t input_height,
-    size_t input_width,
-    const float* input,
-    const float* weights,
-    const float* zero,
-    float* output,
-    uint32_t padding_top,
-    const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(input_height != 0);
-  assert(input_width != 0);
-  assert(input_width % sizeof(float) == 0);
-  assert(padding_top >= 1);
-  assert(padding_top <= 2);
-
-  const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
-  const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-
-  const v128_t vbias = wasm_v32x4_load_splat(weights);
-  const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
-  const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
-  const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
-  const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
-  const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
-  const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
-  const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
-  const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
-  const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
-  const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
-  const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
-  const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
-  const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
-  const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
-  const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
-  const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
-  const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
-  const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
-  const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
-  const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
-  const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
-  const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
-  const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
-  const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
-  const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
-
-  const v128_t vzero = wasm_f32x4_splat(0.0f);
-
-  const uint32_t padding_top_less_1 = padding_top - 1;
-  const size_t input_decrement = round_down_po2(input_width - 1 * sizeof(float), 4 * sizeof(float)) + 4 * sizeof(float);
-
-  const float* i0 = zero;
-  const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
-  const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
-  if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
-    i1 = zero;
-  }
-  const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
-  const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-  size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
-  size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
-  do {
-    if XNN_UNPREDICTABLE(padded_input_height <= 6) {
-      i4 = zero;
-    }
-    if XNN_UNPREDICTABLE(padded_input_height < 6) {
-      i3 = zero;
-    }
-
-    v128_t vi0x0123 = vzero;
-    v128_t vi1x0123 = vzero;
-    v128_t vi2x0123 = vzero;
-    v128_t vi3x0123 = vzero;
-    v128_t vi4x0123 = vzero;
-    v128_t vi0x4567 = wasm_v128_load(i0);
-    i0 += 4;
-    v128_t vi1x4567 = wasm_v128_load(i1);
-    i1 += 4;
-    v128_t vi2x4567 = wasm_v128_load(i2);
-    i2 += 4;
-    v128_t vi3x4567 = wasm_v128_load(i3);
-    i3 += 4;
-    v128_t vi4x4567 = wasm_v128_load(i4);
-    i4 += 4;
-
-    size_t w = input_width;
-    for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
-      v128_t vo468Ap0 = vbias;
-
-      const v128_t vi0x89AB = wasm_v128_load(i0);
-      const v128_t vi1x89AB = wasm_v128_load(i1);
-      const v128_t vi2x89AB = wasm_v128_load(i2);
-      const v128_t vi3x89AB = wasm_v128_load(i3);
-      const v128_t vi4x89AB = wasm_v128_load(i4);
-
-      const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
-      i0 += 8;
-      const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
-      i1 += 8;
-      const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
-      i2 += 8;
-      const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
-      i3 += 8;
-      const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
-      i4 += 8;
-
-      const v128_t vi0x468A = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi0x579B = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi1x468A = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi1x579B = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi2x468A = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi2x579B = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi3x468A = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi3x579B = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 3, 4 + 1, 4 + 3);
-      const v128_t vi4x468A = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 0, 2, 4 + 0, 4 + 2);
-      const v128_t vi4x579B = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 3, 4 + 1, 4 + 3);
-
-      // middle tap
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x468A, vk02));
-      v128_t vo468Ap1 = wasm_f32x4_mul(vi1x468A, vk12);
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x468A, vk22));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x468A, vk32));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x468A, vk42));
-
-      // one left
-      const v128_t vi0x3579 = wasm_v32x4_shuffle(vi0x0123, vi0x579B, 3, 4, 5, 6);
-      const v128_t vi1x3579 = wasm_v32x4_shuffle(vi1x0123, vi1x579B, 3, 4, 5, 6);
-      const v128_t vi2x3579 = wasm_v32x4_shuffle(vi2x0123, vi2x579B, 3, 4, 5, 6);
-      const v128_t vi3x3579 = wasm_v32x4_shuffle(vi3x0123, vi3x579B, 3, 4, 5, 6);
-      const v128_t vi4x3579 = wasm_v32x4_shuffle(vi4x0123, vi4x579B, 3, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x3579, vk01));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x3579, vk11));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x3579, vk21));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x3579, vk31));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x3579, vk41));
-
-      // two left
-      const v128_t vi0x2468 = wasm_v32x4_shuffle(vi0x0123, vi0x468A, 2, 4, 5, 6);
-      const v128_t vi1x2468 = wasm_v32x4_shuffle(vi1x0123, vi1x468A, 2, 4, 5, 6);
-      const v128_t vi2x2468 = wasm_v32x4_shuffle(vi2x0123, vi2x468A, 2, 4, 5, 6);
-      const v128_t vi3x2468 = wasm_v32x4_shuffle(vi3x0123, vi3x468A, 2, 4, 5, 6);
-      const v128_t vi4x2468 = wasm_v32x4_shuffle(vi4x0123, vi4x468A, 2, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x2468, vk00));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x2468, vk10));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x2468, vk20));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x2468, vk30));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x2468, vk40));
-
-      vi0x0123 = vi0x89AB;
-      vi1x0123 = vi1x89AB;
-      vi2x0123 = vi2x89AB;
-      vi3x0123 = vi3x89AB;
-      vi4x0123 = vi4x89AB;
-
-      // one right
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x579B, vk03));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x579B, vk13));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x579B, vk23));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x579B, vk33));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x579B, vk43));
-
-      // two right
-      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x468A, vi0xCDEF, 1, 2, 3, 4);
-      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x468A, vi1xCDEF, 1, 2, 3, 4);
-      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x468A, vi2xCDEF, 1, 2, 3, 4);
-      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x468A, vi3xCDEF, 1, 2, 3, 4);
-      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x468A, vi4xCDEF, 1, 2, 3, 4);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x68AC, vk04));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x68AC, vk14));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x68AC, vk24));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x68AC, vk34));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x68AC, vk44));
-
-      vi0x4567 = vi0xCDEF;
-      vi1x4567 = vi1xCDEF;
-      vi2x4567 = vi2xCDEF;
-      vi3x4567 = vi3xCDEF;
-      vi4x4567 = vi4xCDEF;
-
-      v128_t vo0 = wasm_f32x4_add(vo468Ap0, vo468Ap1);
-
-      vo0 = wasm_v128_bitselect(vmin, vo0, wasm_f32x4_lt(vo0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
-      if XNN_LIKELY(w_tmp >= 4) {
-        wasm_v128_store(output, vo0);
-        output += 4;
-      } else {
-        if (w_tmp & 2) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo0, 0);
-          output += 2;
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w_tmp & 1) {
-          *output = wasm_f32x4_extract_lane(vo0, 0);
-          output += 1;
-        }
-      }
-    }
-
-    {
-      v128_t vo468Ap0 = vbias;
-
-      v128_t vi0x89AB = vzero;
-      v128_t vi1x89AB = vzero;
-      v128_t vi2x89AB = vzero;
-      v128_t vi3x89AB = vzero;
-      v128_t vi4x89AB = vzero;
-      if XNN_LIKELY(w > 4 * sizeof(float)) {
-        vi0x89AB = wasm_v128_load(i0);
-        i0 += 4;
-        vi1x89AB = wasm_v128_load(i1);
-        i1 += 4;
-        vi2x89AB = wasm_v128_load(i2);
-        i2 += 4;
-        vi3x89AB = wasm_v128_load(i3);
-        i3 += 4;
-        vi4x89AB = wasm_v128_load(i4);
-        i4 += 4;
-      }
-
-      v128_t vi0xCDEF = vzero;
-      v128_t vi1xCDEF = vzero;
-      v128_t vi2xCDEF = vzero;
-      v128_t vi3xCDEF = vzero;
-      v128_t vi4xCDEF = vzero;
-      if XNN_LIKELY(w > 8 * sizeof(float)) {
-        vi0xCDEF = wasm_v128_load(i0);
-        i0 += 4;
-        vi1xCDEF = wasm_v128_load(i1);
-        i1 += 4;
-        vi2xCDEF = wasm_v128_load(i2);
-        i2 += 4;
-        vi3xCDEF = wasm_v128_load(i3);
-        i3 += 4;
-        vi4xCDEF = wasm_v128_load(i4);
-        i4 += 4;
-      }
-
-      v128_t vi0x468A = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi0x579B = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi1x468A = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi1x579B = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi2x468A = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi2x579B = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi3x468A = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi3x579B = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 3, 4 + 1, 4 + 3);
-      v128_t vi4x468A = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 0, 2, 4 + 0, 4 + 2);
-      v128_t vi4x579B = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 3, 4 + 1, 4 + 3);
-
-      vi0x468A = wasm_v128_and(vmask_even, vi0x468A);
-      vi1x468A = wasm_v128_and(vmask_even, vi1x468A);
-      vi2x468A = wasm_v128_and(vmask_even, vi2x468A);
-      vi3x468A = wasm_v128_and(vmask_even, vi3x468A);
-      vi4x468A = wasm_v128_and(vmask_even, vi4x468A);
-
-      vi0x579B = wasm_v128_and(vmask_odd, vi0x579B);
-      vi1x579B = wasm_v128_and(vmask_odd, vi1x579B);
-      vi2x579B = wasm_v128_and(vmask_odd, vi2x579B);
-      vi3x579B = wasm_v128_and(vmask_odd, vi3x579B);
-      vi4x579B = wasm_v128_and(vmask_odd, vi4x579B);
-
-      // middle tap
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x468A, vk02));
-      v128_t vo468Ap1 = wasm_f32x4_mul(vi1x468A, vk12);
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x468A, vk22));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x468A, vk32));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x468A, vk42));
-
-      // one left
-      const v128_t vi0x3579 = wasm_v32x4_shuffle(vi0x0123, vi0x579B, 3, 4, 5, 6);
-      const v128_t vi1x3579 = wasm_v32x4_shuffle(vi1x0123, vi1x579B, 3, 4, 5, 6);
-      const v128_t vi2x3579 = wasm_v32x4_shuffle(vi2x0123, vi2x579B, 3, 4, 5, 6);
-      const v128_t vi3x3579 = wasm_v32x4_shuffle(vi3x0123, vi3x579B, 3, 4, 5, 6);
-      const v128_t vi4x3579 = wasm_v32x4_shuffle(vi4x0123, vi4x579B, 3, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x3579, vk01));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x3579, vk11));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x3579, vk21));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x3579, vk31));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x3579, vk41));
-
-      // two left
-      const v128_t vi0x2468 = wasm_v32x4_shuffle(vi0x0123, vi0x468A, 2, 4, 5, 6);
-      const v128_t vi1x2468 = wasm_v32x4_shuffle(vi1x0123, vi1x468A, 2, 4, 5, 6);
-      const v128_t vi2x2468 = wasm_v32x4_shuffle(vi2x0123, vi2x468A, 2, 4, 5, 6);
-      const v128_t vi3x2468 = wasm_v32x4_shuffle(vi3x0123, vi3x468A, 2, 4, 5, 6);
-      const v128_t vi4x2468 = wasm_v32x4_shuffle(vi4x0123, vi4x468A, 2, 4, 5, 6);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x2468, vk00));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x2468, vk10));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x2468, vk20));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x2468, vk30));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x2468, vk40));
-
-      vi0x0123 = vi0x89AB;
-      vi1x0123 = vi1x89AB;
-      vi2x0123 = vi2x89AB;
-      vi3x0123 = vi3x89AB;
-      vi4x0123 = vi4x89AB;
-
-      // one right
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x579B, vk03));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x579B, vk13));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x579B, vk23));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x579B, vk33));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x579B, vk43));
-
-      // two right
-      const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x468A, vi0xCDEF, 1, 2, 3, 4);
-      const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x468A, vi1xCDEF, 1, 2, 3, 4);
-      const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x468A, vi2xCDEF, 1, 2, 3, 4);
-      const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x468A, vi3xCDEF, 1, 2, 3, 4);
-      const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x468A, vi4xCDEF, 1, 2, 3, 4);
-
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi0x68AC, vk04));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi1x68AC, vk14));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi2x68AC, vk24));
-      vo468Ap1 = wasm_f32x4_add(vo468Ap1, wasm_f32x4_mul(vi3x68AC, vk34));
-      vo468Ap0 = wasm_f32x4_add(vo468Ap0, wasm_f32x4_mul(vi4x68AC, vk44));
-
-      vi0x4567 = vi0xCDEF;
-      vi1x4567 = vi1xCDEF;
-      vi2x4567 = vi2xCDEF;
-      vi3x4567 = vi3xCDEF;
-      vi4x4567 = vi4xCDEF;
-
-      v128_t vo0 = wasm_f32x4_add(vo468Ap0, vo468Ap1);
-
-      vo0 = wasm_v128_bitselect(vmin, vo0, wasm_f32x4_lt(vo0, vmin));
-      vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
-
-      size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
-      if XNN_LIKELY(w_tmp >= 4) {
-        wasm_v128_store(output, vo0);
-        output += 4;
-      } else {
-        if (w_tmp & 2) {
-          *((double*) output) = wasm_f64x2_extract_lane(vo0, 0);
-          output += 2;
-          vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
-        }
-        if (w_tmp & 1) {
-          *output = wasm_f32x4_extract_lane(vo0, 0);
-          output += 1;
-        }
-      }
-    }
-
-    i0 = (const float*) ((uintptr_t) i2 - input_decrement);
-    i1 = (const float*) ((uintptr_t) i3 - input_decrement);
-    i2 = (const float*) ((uintptr_t) i4 - input_decrement);
-    i3 = (const float*) ((uintptr_t) i2 + input_width);
-    i4 = (const float*) ((uintptr_t) i3 + input_width);
-
-    output_height -= 1;
-    padded_input_height -= 2;
-  } while (output_height != 0);
-}
diff --git a/src/f32-gemm/gen-inc/1x8inc-minmax-sse2-dup.c b/src/f32-gemm/gen-inc/1x8inc-minmax-sse2-dup.c
new file mode 100644
index 0000000..8dd0987
--- /dev/null
+++ b/src/f32-gemm/gen-inc/1x8inc-minmax-sse2-dup.c
@@ -0,0 +1,143 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    acc += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+
+
+      const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..06cb1a1
--- /dev/null
+++ b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,101 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    acc += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..c93a943
--- /dev/null
+++ b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,139 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    acc += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index 71611ed..0000000
--- a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    acc += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 2ea5c5a..0000000
--- a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    acc += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index e0b5ee2..0000000
--- a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    acc += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 775b2a9..0000000
--- a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    acc += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..670c303
--- /dev/null
+++ b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,101 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    acc += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..8080f18
--- /dev/null
+++ b/src/f32-gemm/gen-inc/1x8inc-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,139 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    acc += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-sse-dup.c b/src/f32-gemm/gen-inc/3x8inc-minmax-sse-dup.c
new file mode 100644
index 0000000..ca65b4b
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x8inc-minmax-sse-dup.c
@@ -0,0 +1,225 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    __m128 vacc1x0123 = _mm_load_ps(acc + 8);
+    __m128 vacc1x4567 = _mm_load_ps(acc + 12);
+    __m128 vacc2x0123 = _mm_load_ps(acc + 16);
+    __m128 vacc2x4567 = _mm_load_ps(acc + 20);
+    acc += 24;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+
+
+      const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-sse-load1.c b/src/f32-gemm/gen-inc/3x8inc-minmax-sse-load1.c
new file mode 100644
index 0000000..ef61246
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x8inc-minmax-sse-load1.c
@@ -0,0 +1,155 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-load1.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    __m128 vacc1x0123 = _mm_load_ps(acc + 8);
+    __m128 vacc1x4567 = _mm_load_ps(acc + 12);
+    __m128 vacc2x0123 = _mm_load_ps(acc + 16);
+    __m128 vacc2x4567 = _mm_load_ps(acc + 20);
+    acc += 24;
+
+    size_t k = kc;
+    do {
+      const __m128 va0 = _mm_load1_ps(a0);
+      a0 += 1;
+      const __m128 va1 = _mm_load1_ps(a1);
+      a1 += 1;
+      const __m128 va2 = _mm_load1_ps(a2);
+      a2 += 1;
+
+      const __m128 vb0123 = _mm_load_ps(w);
+      const __m128 vb4567 = _mm_load_ps(w + 4);
+      w += 8;
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-sse2-dup.c b/src/f32-gemm/gen-inc/3x8inc-minmax-sse2-dup.c
new file mode 100644
index 0000000..cadedaa
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x8inc-minmax-sse2-dup.c
@@ -0,0 +1,225 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    __m128 vacc1x0123 = _mm_load_ps(acc + 8);
+    __m128 vacc1x4567 = _mm_load_ps(acc + 12);
+    __m128 vacc2x0123 = _mm_load_ps(acc + 16);
+    __m128 vacc2x4567 = _mm_load_ps(acc + 20);
+    acc += 24;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+
+
+      const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..e582b29
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,155 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    acc += 24;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..ed7b1f3
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,221 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    acc += 24;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index d27d997..0000000
--- a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    acc += 24;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index e723a4e..0000000
--- a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    acc += 24;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 4731f5a..0000000
--- a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,221 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    acc += 24;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 7ec96d0..0000000
--- a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,221 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    acc += 24;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..851551d
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,155 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    acc += 24;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..f6ad9cd
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x8inc-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,221 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    acc += 24;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x8s4inc-minmax-sse.c b/src/f32-gemm/gen-inc/3x8s4inc-minmax-sse.c
new file mode 100644
index 0000000..946477f
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x8s4inc-minmax-sse.c
@@ -0,0 +1,222 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_3x8s4__sse(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    __m128 vacc1x0123 = _mm_load_ps(acc + 8);
+    __m128 vacc1x4567 = _mm_load_ps(acc + 12);
+    __m128 vacc2x0123 = _mm_load_ps(acc + 16);
+    __m128 vacc2x4567 = _mm_load_ps(acc + 20);
+    acc += 24;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/4x8inc-minmax-sse2-dup.c b/src/f32-gemm/gen-inc/4x8inc-minmax-sse2-dup.c
new file mode 100644
index 0000000..b3763f2
--- /dev/null
+++ b/src/f32-gemm/gen-inc/4x8inc-minmax-sse2-dup.c
@@ -0,0 +1,266 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    __m128 vacc1x0123 = _mm_load_ps(acc + 8);
+    __m128 vacc1x4567 = _mm_load_ps(acc + 12);
+    __m128 vacc2x0123 = _mm_load_ps(acc + 16);
+    __m128 vacc2x4567 = _mm_load_ps(acc + 20);
+    __m128 vacc3x0123 = _mm_load_ps(acc + 24);
+    __m128 vacc3x4567 = _mm_load_ps(acc + 28);
+    acc += 32;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+      const __m128 va3 = _mm_loadu_ps(a3);
+      a3 += 4;
+
+
+      const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+        const __m128 va3 = _mm_load1_ps(a3);
+        a3 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..0d3942e
--- /dev/null
+++ b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,182 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    acc += 32;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..988c2e9
--- /dev/null
+++ b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,262 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    acc += 32;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index e624d46..0000000
--- a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    acc += 32;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 52f9961..0000000
--- a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    acc += 32;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 92200a8..0000000
--- a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,262 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    acc += 32;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index e589f13..0000000
--- a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,262 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    acc += 32;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..82b0967
--- /dev/null
+++ b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,182 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    acc += 32;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..d4db122
--- /dev/null
+++ b/src/f32-gemm/gen-inc/4x8inc-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,262 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    acc += 32;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-sse-dup.c b/src/f32-gemm/gen-inc/5x8inc-minmax-sse-dup.c
new file mode 100644
index 0000000..f587e5b
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x8inc-minmax-sse-dup.c
@@ -0,0 +1,307 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    __m128 vacc1x0123 = _mm_load_ps(acc + 8);
+    __m128 vacc1x4567 = _mm_load_ps(acc + 12);
+    __m128 vacc2x0123 = _mm_load_ps(acc + 16);
+    __m128 vacc2x4567 = _mm_load_ps(acc + 20);
+    __m128 vacc3x0123 = _mm_load_ps(acc + 24);
+    __m128 vacc3x4567 = _mm_load_ps(acc + 28);
+    __m128 vacc4x0123 = _mm_load_ps(acc + 32);
+    __m128 vacc4x4567 = _mm_load_ps(acc + 36);
+    acc += 40;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+      const __m128 va3 = _mm_loadu_ps(a3);
+      a3 += 4;
+      const __m128 va4 = _mm_loadu_ps(a4);
+      a4 += 4;
+
+
+      const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va3c0000 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va4c0000 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 0, 0, 0));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va3c1111 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va4c1111 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(1, 1, 1, 1));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va3c2222 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va4c2222 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(2, 2, 2, 2));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+        const __m128 va3 = _mm_load1_ps(a3);
+        a3 += 1;
+        const __m128 va4 = _mm_load1_ps(a4);
+        a4 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-sse-load1.c b/src/f32-gemm/gen-inc/5x8inc-minmax-sse-load1.c
new file mode 100644
index 0000000..173e7e4
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x8inc-minmax-sse-load1.c
@@ -0,0 +1,209 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-load1.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    __m128 vacc1x0123 = _mm_load_ps(acc + 8);
+    __m128 vacc1x4567 = _mm_load_ps(acc + 12);
+    __m128 vacc2x0123 = _mm_load_ps(acc + 16);
+    __m128 vacc2x4567 = _mm_load_ps(acc + 20);
+    __m128 vacc3x0123 = _mm_load_ps(acc + 24);
+    __m128 vacc3x4567 = _mm_load_ps(acc + 28);
+    __m128 vacc4x0123 = _mm_load_ps(acc + 32);
+    __m128 vacc4x4567 = _mm_load_ps(acc + 36);
+    acc += 40;
+
+    size_t k = kc;
+    do {
+      const __m128 va0 = _mm_load1_ps(a0);
+      a0 += 1;
+      const __m128 va1 = _mm_load1_ps(a1);
+      a1 += 1;
+      const __m128 va2 = _mm_load1_ps(a2);
+      a2 += 1;
+      const __m128 va3 = _mm_load1_ps(a3);
+      a3 += 1;
+      const __m128 va4 = _mm_load1_ps(a4);
+      a4 += 1;
+
+      const __m128 vb0123 = _mm_load_ps(w);
+      const __m128 vb4567 = _mm_load_ps(w + 4);
+      w += 8;
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-sse2-dup.c b/src/f32-gemm/gen-inc/5x8inc-minmax-sse2-dup.c
new file mode 100644
index 0000000..7f6fb8e
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x8inc-minmax-sse2-dup.c
@@ -0,0 +1,307 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    __m128 vacc1x0123 = _mm_load_ps(acc + 8);
+    __m128 vacc1x4567 = _mm_load_ps(acc + 12);
+    __m128 vacc2x0123 = _mm_load_ps(acc + 16);
+    __m128 vacc2x4567 = _mm_load_ps(acc + 20);
+    __m128 vacc3x0123 = _mm_load_ps(acc + 24);
+    __m128 vacc3x4567 = _mm_load_ps(acc + 28);
+    __m128 vacc4x0123 = _mm_load_ps(acc + 32);
+    __m128 vacc4x4567 = _mm_load_ps(acc + 36);
+    acc += 40;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+      const __m128 va3 = _mm_loadu_ps(a3);
+      a3 += 4;
+      const __m128 va4 = _mm_loadu_ps(a4);
+      a4 += 4;
+
+
+      const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va4c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(0, 0, 0, 0)));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va4c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(1, 1, 1, 1)));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va4c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(2, 2, 2, 2)));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+        const __m128 va3 = _mm_load1_ps(a3);
+        a3 += 1;
+        const __m128 va4 = _mm_load1_ps(a4);
+        a4 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..888d4a4
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,209 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
+    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
+    acc += 40;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+      const v128_t va4 = wasm_v32x4_load_splat(a4);
+      a4 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..bb85423
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,303 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
+    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
+    acc += 40;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+      const v128_t va4 = wasm_v128_load(a4);
+      a4 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index ef4dcda..0000000
--- a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,209 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
-    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
-    acc += 40;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-      const v128_t va4 = wasm_v32x4_load_splat(a4);
-      a4 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index a19939d..0000000
--- a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,209 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
-    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
-    acc += 40;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-      const v128_t va4 = wasm_v32x4_load_splat(a4);
-      a4 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index b17c365..0000000
--- a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,303 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
-    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
-    acc += 40;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-      const v128_t va4 = wasm_v128_load(a4);
-      a4 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 7d0d778..0000000
--- a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,303 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
-    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
-    acc += 40;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-      const v128_t va4 = wasm_v128_load(a4);
-      a4 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..c773134
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,209 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
+    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
+    acc += 40;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+      const v128_t va4 = wasm_v32x4_load_splat(a4);
+      a4 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..0f09cb6
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x8inc-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,303 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
+    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
+    acc += 40;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+      const v128_t va4 = wasm_v128_load(a4);
+      a4 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x8s4inc-minmax-sse.c b/src/f32-gemm/gen-inc/5x8s4inc-minmax-sse.c
new file mode 100644
index 0000000..4262bdd
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x8s4inc-minmax-sse.c
@@ -0,0 +1,302 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_5x8s4__sse(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(acc + 0);
+    __m128 vacc0x4567 = _mm_load_ps(acc + 4);
+    __m128 vacc1x0123 = _mm_load_ps(acc + 8);
+    __m128 vacc1x4567 = _mm_load_ps(acc + 12);
+    __m128 vacc2x0123 = _mm_load_ps(acc + 16);
+    __m128 vacc2x4567 = _mm_load_ps(acc + 20);
+    __m128 vacc3x0123 = _mm_load_ps(acc + 24);
+    __m128 vacc3x4567 = _mm_load_ps(acc + 28);
+    __m128 vacc4x0123 = _mm_load_ps(acc + 32);
+    __m128 vacc4x4567 = _mm_load_ps(acc + 36);
+    acc += 40;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+      __m128 va3 = _mm_loadu_ps(a3);
+      a3 += 4;
+      __m128 va4 = _mm_loadu_ps(a4);
+      a4 += 4;
+
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+      va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
+      va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+      va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
+      va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+      va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
+      va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+        const __m128 va3 = _mm_load1_ps(a3);
+        a3 += 1;
+        const __m128 va4 = _mm_load1_ps(a4);
+        a4 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..11169b4
--- /dev/null
+++ b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,236 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
+    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
+    v128_t vacc5x0123 = wasm_v128_load(acc + 40);
+    v128_t vacc5x4567 = wasm_v128_load(acc + 44);
+    acc += 48;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+      const v128_t va4 = wasm_v32x4_load_splat(a4);
+      a4 += 1;
+      const v128_t va5 = wasm_v32x4_load_splat(a5);
+      a5 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..c7cfc25
--- /dev/null
+++ b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,344 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
+    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
+    v128_t vacc5x0123 = wasm_v128_load(acc + 40);
+    v128_t vacc5x4567 = wasm_v128_load(acc + 44);
+    acc += 48;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+      const v128_t va4 = wasm_v128_load(a4);
+      a4 += 4;
+      const v128_t va5 = wasm_v128_load(a5);
+      a5 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+      const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+      const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+      const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+      const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+        const v128_t va5 = wasm_v32x4_load_splat(a5);
+        a5 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index 73a7ce8..0000000
--- a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    a5 = a4;
-    c5 = c4;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
-    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
-    v128_t vacc5x0123 = wasm_v128_load(acc + 40);
-    v128_t vacc5x4567 = wasm_v128_load(acc + 44);
-    acc += 48;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-      const v128_t va4 = wasm_v32x4_load_splat(a4);
-      a4 += 1;
-      const v128_t va5 = wasm_v32x4_load_splat(a5);
-      a5 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a5 = (const float*) ((uintptr_t) a5 - kc);
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index fbf70d3..0000000
--- a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    a5 = a4;
-    c5 = c4;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
-    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
-    v128_t vacc5x0123 = wasm_v128_load(acc + 40);
-    v128_t vacc5x4567 = wasm_v128_load(acc + 44);
-    acc += 48;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-      const v128_t va4 = wasm_v32x4_load_splat(a4);
-      a4 += 1;
-      const v128_t va5 = wasm_v32x4_load_splat(a5);
-      a5 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a5 = (const float*) ((uintptr_t) a5 - kc);
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 2bd165f..0000000
--- a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,344 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    a5 = a4;
-    c5 = c4;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
-    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
-    v128_t vacc5x0123 = wasm_v128_load(acc + 40);
-    v128_t vacc5x4567 = wasm_v128_load(acc + 44);
-    acc += 48;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-      const v128_t va4 = wasm_v128_load(a4);
-      a4 += 4;
-      const v128_t va5 = wasm_v128_load(a5);
-      a5 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-      const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-      const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-      const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-      const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-        const v128_t va5 = wasm_v32x4_load_splat(a5);
-        a5 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a5 = (const float*) ((uintptr_t) a5 - kc);
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 93522ce..0000000
--- a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,344 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const float*restrict acc,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-  assert(acc != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    a5 = a4;
-    c5 = c4;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
-    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
-    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
-    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
-    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
-    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
-    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
-    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
-    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
-    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
-    v128_t vacc5x0123 = wasm_v128_load(acc + 40);
-    v128_t vacc5x4567 = wasm_v128_load(acc + 44);
-    acc += 48;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-      const v128_t va4 = wasm_v128_load(a4);
-      a4 += 4;
-      const v128_t va5 = wasm_v128_load(a5);
-      a5 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-      const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-      const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-      const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-      const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-        const v128_t va5 = wasm_v32x4_load_splat(a5);
-        a5 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a5 = (const float*) ((uintptr_t) a5 - kc);
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..89cbcac
--- /dev/null
+++ b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,236 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
+    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
+    v128_t vacc5x0123 = wasm_v128_load(acc + 40);
+    v128_t vacc5x4567 = wasm_v128_load(acc + 44);
+    acc += 48;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+      const v128_t va4 = wasm_v32x4_load_splat(a4);
+      a4 += 1;
+      const v128_t va5 = wasm_v32x4_load_splat(a5);
+      a5 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..b92ea26
--- /dev/null
+++ b/src/f32-gemm/gen-inc/6x8inc-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,344 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const float*restrict acc,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+  assert(acc != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(acc + 0);
+    v128_t vacc0x4567 = wasm_v128_load(acc + 4);
+    v128_t vacc1x0123 = wasm_v128_load(acc + 8);
+    v128_t vacc1x4567 = wasm_v128_load(acc + 12);
+    v128_t vacc2x0123 = wasm_v128_load(acc + 16);
+    v128_t vacc2x4567 = wasm_v128_load(acc + 20);
+    v128_t vacc3x0123 = wasm_v128_load(acc + 24);
+    v128_t vacc3x4567 = wasm_v128_load(acc + 28);
+    v128_t vacc4x0123 = wasm_v128_load(acc + 32);
+    v128_t vacc4x4567 = wasm_v128_load(acc + 36);
+    v128_t vacc5x0123 = wasm_v128_load(acc + 40);
+    v128_t vacc5x4567 = wasm_v128_load(acc + 44);
+    acc += 48;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+      const v128_t va4 = wasm_v128_load(a4);
+      a4 += 4;
+      const v128_t va5 = wasm_v128_load(a5);
+      a5 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+      const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+      const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+      const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+      const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+        const v128_t va5 = wasm_v32x4_load_splat(a5);
+        a5 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/1x8-minmax-sse2-dup.c b/src/f32-gemm/gen/1x8-minmax-sse2-dup.c
new file mode 100644
index 0000000..fea2811
--- /dev/null
+++ b/src/f32-gemm/gen/1x8-minmax-sse2-dup.c
@@ -0,0 +1,141 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+
+
+      const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..c42d393
--- /dev/null
+++ b/src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,99 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..2ac1368
--- /dev/null
+++ b/src/f32-gemm/gen/1x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,137 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index 43175e5..0000000
--- a/src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 2de3550..0000000
--- a/src/f32-gemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index c62afbe..0000000
--- a/src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 5b5f552..0000000
--- a/src/f32-gemm/gen/1x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..7f1fd8d
--- /dev/null
+++ b/src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,99 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..4a3f5d4
--- /dev/null
+++ b/src/f32-gemm/gen/1x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,137 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x8-minmax-sse-dup.c b/src/f32-gemm/gen/3x8-minmax-sse-dup.c
new file mode 100644
index 0000000..59210b4
--- /dev/null
+++ b/src/f32-gemm/gen/3x8-minmax-sse-dup.c
@@ -0,0 +1,223 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_3x8__sse_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+
+
+      const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x8-minmax-sse-load1.c b/src/f32-gemm/gen/3x8-minmax-sse-load1.c
new file mode 100644
index 0000000..88afe33
--- /dev/null
+++ b/src/f32-gemm/gen/3x8-minmax-sse-load1.c
@@ -0,0 +1,153 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-load1.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_3x8__sse_load1(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const __m128 va0 = _mm_load1_ps(a0);
+      a0 += 1;
+      const __m128 va1 = _mm_load1_ps(a1);
+      a1 += 1;
+      const __m128 va2 = _mm_load1_ps(a2);
+      a2 += 1;
+
+      const __m128 vb0123 = _mm_load_ps(w);
+      const __m128 vb4567 = _mm_load_ps(w + 4);
+      w += 8;
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x8-minmax-sse2-dup.c b/src/f32-gemm/gen/3x8-minmax-sse2-dup.c
new file mode 100644
index 0000000..0106d13
--- /dev/null
+++ b/src/f32-gemm/gen/3x8-minmax-sse2-dup.c
@@ -0,0 +1,223 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+
+
+      const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..de5e0db
--- /dev/null
+++ b/src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,153 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..4d8cdd5
--- /dev/null
+++ b/src/f32-gemm/gen/3x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,219 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index c5de0b3..0000000
--- a/src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,153 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 4ae2122..0000000
--- a/src/f32-gemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,153 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index c0a6642..0000000
--- a/src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index b51250f..0000000
--- a/src/f32-gemm/gen/3x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..73e0c47
--- /dev/null
+++ b/src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,153 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..5f48009
--- /dev/null
+++ b/src/f32-gemm/gen/3x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,219 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x8s4-minmax-sse.c b/src/f32-gemm/gen/3x8s4-minmax-sse.c
new file mode 100644
index 0000000..d908dff
--- /dev/null
+++ b/src/f32-gemm/gen/3x8s4-minmax-sse.c
@@ -0,0 +1,220 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_3x8s4__sse(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/4x8-minmax-sse2-dup.c b/src/f32-gemm/gen/4x8-minmax-sse2-dup.c
new file mode 100644
index 0000000..1cd6879
--- /dev/null
+++ b/src/f32-gemm/gen/4x8-minmax-sse2-dup.c
@@ -0,0 +1,264 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+      const __m128 va3 = _mm_loadu_ps(a3);
+      a3 += 4;
+
+
+      const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+        const __m128 va3 = _mm_load1_ps(a3);
+        a3 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..0e01a5f
--- /dev/null
+++ b/src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,180 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..74b6a71
--- /dev/null
+++ b/src/f32-gemm/gen/4x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,260 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index 387d46e..0000000
--- a/src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index fd440c3..0000000
--- a/src/f32-gemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index c14f711..0000000
--- a/src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,260 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 0ab1857..0000000
--- a/src/f32-gemm/gen/4x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,260 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..3ce48dc
--- /dev/null
+++ b/src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,180 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..0dee792
--- /dev/null
+++ b/src/f32-gemm/gen/4x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,260 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x8-minmax-sse-dup.c b/src/f32-gemm/gen/5x8-minmax-sse-dup.c
new file mode 100644
index 0000000..0765ab7
--- /dev/null
+++ b/src/f32-gemm/gen/5x8-minmax-sse-dup.c
@@ -0,0 +1,305 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_5x8__sse_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    __m128 vacc4x0123 = vacc0x0123;
+    __m128 vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+      const __m128 va3 = _mm_loadu_ps(a3);
+      a3 += 4;
+      const __m128 va4 = _mm_loadu_ps(a4);
+      a4 += 4;
+
+
+      const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va3c0000 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 0, 0, 0));
+      const __m128 va4c0000 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 0, 0, 0));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va3c1111 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(1, 1, 1, 1));
+      const __m128 va4c1111 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(1, 1, 1, 1));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va3c2222 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(2, 2, 2, 2));
+      const __m128 va4c2222 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(2, 2, 2, 2));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+        const __m128 va3 = _mm_load1_ps(a3);
+        a3 += 1;
+        const __m128 va4 = _mm_load1_ps(a4);
+        a4 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x8-minmax-sse-load1.c b/src/f32-gemm/gen/5x8-minmax-sse-load1.c
new file mode 100644
index 0000000..9320d65
--- /dev/null
+++ b/src/f32-gemm/gen/5x8-minmax-sse-load1.c
@@ -0,0 +1,207 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-load1.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_5x8__sse_load1(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    __m128 vacc4x0123 = vacc0x0123;
+    __m128 vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const __m128 va0 = _mm_load1_ps(a0);
+      a0 += 1;
+      const __m128 va1 = _mm_load1_ps(a1);
+      a1 += 1;
+      const __m128 va2 = _mm_load1_ps(a2);
+      a2 += 1;
+      const __m128 va3 = _mm_load1_ps(a3);
+      a3 += 1;
+      const __m128 va4 = _mm_load1_ps(a4);
+      a4 += 1;
+
+      const __m128 vb0123 = _mm_load_ps(w);
+      const __m128 vb4567 = _mm_load_ps(w + 4);
+      w += 8;
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x8-minmax-sse2-dup.c b/src/f32-gemm/gen/5x8-minmax-sse2-dup.c
new file mode 100644
index 0000000..48df030
--- /dev/null
+++ b/src/f32-gemm/gen/5x8-minmax-sse2-dup.c
@@ -0,0 +1,305 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    __m128 vacc4x0123 = vacc0x0123;
+    __m128 vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      const __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      const __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+      const __m128 va3 = _mm_loadu_ps(a3);
+      a3 += 4;
+      const __m128 va4 = _mm_loadu_ps(a4);
+      a4 += 4;
+
+
+      const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
+      const __m128 va4c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(0, 0, 0, 0)));
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
+
+      const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
+      const __m128 va4c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(1, 1, 1, 1)));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
+
+      const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
+      const __m128 va4c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(2, 2, 2, 2)));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
+
+      const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
+      const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+        const __m128 va3 = _mm_load1_ps(a3);
+        a3 += 1;
+        const __m128 va4 = _mm_load1_ps(a4);
+        a4 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..e7d711b
--- /dev/null
+++ b/src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,207 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+      const v128_t va4 = wasm_v32x4_load_splat(a4);
+      a4 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..b00fcdb
--- /dev/null
+++ b/src/f32-gemm/gen/5x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,301 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+      const v128_t va4 = wasm_v128_load(a4);
+      a4 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index 69357f8..0000000
--- a/src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-      const v128_t va4 = wasm_v32x4_load_splat(a4);
-      a4 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 84f197f..0000000
--- a/src/f32-gemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-      const v128_t va4 = wasm_v32x4_load_splat(a4);
-      a4 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 848e047..0000000
--- a/src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,301 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-      const v128_t va4 = wasm_v128_load(a4);
-      a4 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 05136f1..0000000
--- a/src/f32-gemm/gen/5x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,301 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-      const v128_t va4 = wasm_v128_load(a4);
-      a4 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..9f89dad
--- /dev/null
+++ b/src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,207 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+      const v128_t va4 = wasm_v32x4_load_splat(a4);
+      a4 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..613760a
--- /dev/null
+++ b/src/f32-gemm/gen/5x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,301 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+      const v128_t va4 = wasm_v128_load(a4);
+      a4 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x8s4-minmax-sse.c b/src/f32-gemm/gen/5x8s4-minmax-sse.c
new file mode 100644
index 0000000..fe51927
--- /dev/null
+++ b/src/f32-gemm/gen/5x8s4-minmax-sse.c
@@ -0,0 +1,300 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/sse-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_5x8s4__sse(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w + 0);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    __m128 vacc4x0123 = vacc0x0123;
+    __m128 vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      __m128 va0 = _mm_loadu_ps(a0);
+      a0 += 4;
+      __m128 va1 = _mm_loadu_ps(a1);
+      a1 += 4;
+      __m128 va2 = _mm_loadu_ps(a2);
+      a2 += 4;
+      __m128 va3 = _mm_loadu_ps(a3);
+      a3 += 4;
+      __m128 va4 = _mm_loadu_ps(a4);
+      a4 += 4;
+
+
+      const __m128 vb0123c0 = _mm_load_ps(w + 0);
+      const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+      va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
+      va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c1 = _mm_load_ps(w + 8);
+      const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+      va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
+      va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c2 = _mm_load_ps(w + 16);
+      const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
+
+      va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+      va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+      va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+      va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
+      va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
+
+      const __m128 vb0123c3 = _mm_load_ps(w + 24);
+      const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+      vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
+      vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
+      vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
+      vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
+      vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
+      vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
+      vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
+      vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
+      vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
+      vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
+
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+        const __m128 va3 = _mm_load1_ps(a3);
+        a3 += 1;
+        const __m128 va4 = _mm_load1_ps(a4);
+        a4 += 1;
+
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..2cb3a53
--- /dev/null
+++ b/src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,234 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    v128_t vacc5x0123 = vacc0x0123;
+    v128_t vacc5x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+      const v128_t va4 = wasm_v32x4_load_splat(a4);
+      a4 += 1;
+      const v128_t va5 = wasm_v32x4_load_splat(a5);
+      a5 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-splat.c b/src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..5e1a889
--- /dev/null
+++ b/src/f32-gemm/gen/6x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,342 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    v128_t vacc5x0123 = vacc0x0123;
+    v128_t vacc5x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+      const v128_t va4 = wasm_v128_load(a4);
+      a4 += 4;
+      const v128_t va5 = wasm_v128_load(a5);
+      a5 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+      const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+      const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+      const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+      const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+        const v128_t va5 = wasm_v32x4_load_splat(a5);
+        a5 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index d50ebac..0000000
--- a/src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,234 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    a5 = a4;
-    c5 = c4;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    v128_t vacc5x0123 = vacc0x0123;
-    v128_t vacc5x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-      const v128_t va4 = wasm_v32x4_load_splat(a4);
-      a4 += 1;
-      const v128_t va5 = wasm_v32x4_load_splat(a5);
-      a5 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a5 = (const float*) ((uintptr_t) a5 - kc);
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 4795c09..0000000
--- a/src/f32-gemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,234 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    a5 = a4;
-    c5 = c4;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    v128_t vacc5x0123 = vacc0x0123;
-    v128_t vacc5x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0 = wasm_v32x4_load_splat(a0);
-      a0 += 1;
-      const v128_t va1 = wasm_v32x4_load_splat(a1);
-      a1 += 1;
-      const v128_t va2 = wasm_v32x4_load_splat(a2);
-      a2 += 1;
-      const v128_t va3 = wasm_v32x4_load_splat(a3);
-      a3 += 1;
-      const v128_t va4 = wasm_v32x4_load_splat(a4);
-      a4 += 1;
-      const v128_t va5 = wasm_v32x4_load_splat(a5);
-      a5 += 1;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a5 = (const float*) ((uintptr_t) a5 - kc);
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-arm.c b/src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 0064182..0000000
--- a/src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,342 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    a5 = a4;
-    c5 = c4;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    v128_t vacc5x0123 = vacc0x0123;
-    v128_t vacc5x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-      const v128_t va4 = wasm_v128_load(a4);
-      a4 += 4;
-      const v128_t va5 = wasm_v128_load(a5);
-      a5 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-      const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-      const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-      const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-      const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-        const v128_t va5 = wasm_v32x4_load_splat(a5);
-        a5 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a5 = (const float*) ((uintptr_t) a5 - kc);
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-x86.c b/src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index fa7e617..0000000
--- a/src/f32-gemm/gen/6x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,342 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-gemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/gemm.h>
-
-
-void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    const float*restrict a,
-    size_t a_stride,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  const float* a0 = a;
-  float* c0 = c;
-  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    a1 = a0;
-    c1 = c0;
-  }
-  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    a2 = a1;
-    c2 = c1;
-  }
-  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    a3 = a2;
-    c3 = c2;
-  }
-  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    a4 = a3;
-    c4 = c3;
-  }
-  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    a5 = a4;
-    c5 = c4;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w + 0);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    v128_t vacc5x0123 = vacc0x0123;
-    v128_t vacc5x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    while (k >= 4 * sizeof(float)) {
-      const v128_t va0 = wasm_v128_load(a0);
-      a0 += 4;
-      const v128_t va1 = wasm_v128_load(a1);
-      a1 += 4;
-      const v128_t va2 = wasm_v128_load(a2);
-      a2 += 4;
-      const v128_t va3 = wasm_v128_load(a3);
-      a3 += 4;
-      const v128_t va4 = wasm_v128_load(a4);
-      a4 += 4;
-      const v128_t va5 = wasm_v128_load(a5);
-      a5 += 4;
-
-      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-      const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
-
-      const v128_t vb0123c0 = wasm_v128_load(w + 0);
-      const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
-      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-      const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
-
-      const v128_t vb0123c1 = wasm_v128_load(w + 8);
-      const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
-      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-      const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
-
-      const v128_t vb0123c2 = wasm_v128_load(w + 16);
-      const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
-      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-      const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
-
-      const v128_t vb0123c3 = wasm_v128_load(w + 24);
-      const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
-
-      w += 32;
-      k -= 4 * sizeof(float);
-    }
-    if XNN_UNLIKELY(k != 0) {
-      do {
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-        const v128_t va5 = wasm_v32x4_load_splat(a5);
-        a5 += 1;
-
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-
-        k -= sizeof(float);
-      } while (k != 0);
-    }
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a5 = (const float*) ((uintptr_t) a5 - kc);
-      a4 = (const float*) ((uintptr_t) a4 - kc);
-      a3 = (const float*) ((uintptr_t) a3 - kc);
-      a2 = (const float*) ((uintptr_t) a2 - kc);
-      a1 = (const float*) ((uintptr_t) a1 - kc);
-      a0 = (const float*) ((uintptr_t) a0 - kc);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..b941ba3
--- /dev/null
+++ b/src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,234 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    v128_t vacc5x0123 = vacc0x0123;
+    v128_t vacc5x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    do {
+      const v128_t va0 = wasm_v32x4_load_splat(a0);
+      a0 += 1;
+      const v128_t va1 = wasm_v32x4_load_splat(a1);
+      a1 += 1;
+      const v128_t va2 = wasm_v32x4_load_splat(a2);
+      a2 += 1;
+      const v128_t va3 = wasm_v32x4_load_splat(a3);
+      a3 += 1;
+      const v128_t va4 = wasm_v32x4_load_splat(a4);
+      a4 += 1;
+      const v128_t va5 = wasm_v32x4_load_splat(a5);
+      a5 += 1;
+
+      const v128_t vb0123 = wasm_v128_load(w);
+      const v128_t vb4567 = wasm_v128_load(w + 4);
+      w += 8;
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+
+      k -= sizeof(float);
+    } while (k != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-splat.c b/src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..912a574
--- /dev/null
+++ b/src/f32-gemm/gen/6x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,342 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-gemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const float*restrict a,
+    size_t a_stride,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const float* a0 = a;
+  float* c0 = c;
+  const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w + 0);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    v128_t vacc5x0123 = vacc0x0123;
+    v128_t vacc5x4567 = vacc0x4567;
+    w += 8;
+
+    size_t k = kc;
+    while (k >= 4 * sizeof(float)) {
+      const v128_t va0 = wasm_v128_load(a0);
+      a0 += 4;
+      const v128_t va1 = wasm_v128_load(a1);
+      a1 += 4;
+      const v128_t va2 = wasm_v128_load(a2);
+      a2 += 4;
+      const v128_t va3 = wasm_v128_load(a3);
+      a3 += 4;
+      const v128_t va4 = wasm_v128_load(a4);
+      a4 += 4;
+      const v128_t va5 = wasm_v128_load(a5);
+      a5 += 4;
+
+      const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+      const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+      const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+      const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+      const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+      const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
+
+      const v128_t vb0123c0 = wasm_v128_load(w + 0);
+      const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
+      const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+      const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+      const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+      const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+      const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+      const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
+
+      const v128_t vb0123c1 = wasm_v128_load(w + 8);
+      const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
+      const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+      const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+      const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+      const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+      const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+      const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
+
+      const v128_t vb0123c2 = wasm_v128_load(w + 16);
+      const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
+      const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+      const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+      const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+      const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+      const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+      const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
+
+      const v128_t vb0123c3 = wasm_v128_load(w + 24);
+      const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+      vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+      vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
+      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+      vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+      vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
+
+      w += 32;
+      k -= 4 * sizeof(float);
+    }
+    if XNN_UNLIKELY(k != 0) {
+      do {
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+        const v128_t va5 = wasm_v32x4_load_splat(a5);
+        a5 += 1;
+
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+
+        k -= sizeof(float);
+      } while (k != 0);
+    }
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a5 = (const float*) ((uintptr_t) a5 - kc);
+      a4 = (const float*) ((uintptr_t) a4 - kc);
+      a3 = (const float*) ((uintptr_t) a3 - kc);
+      a2 = (const float*) ((uintptr_t) a2 - kc);
+      a1 = (const float*) ((uintptr_t) a1 - kc);
+      a0 = (const float*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-gemm/sse-dup.c.in b/src/f32-gemm/sse-dup.c.in
index ee2e041..98a3bb7 100644
--- a/src/f32-gemm/sse-dup.c.in
+++ b/src/f32-gemm/sse-dup.c.in
@@ -5,14 +5,16 @@
 
 $assert NR % 4 == 0
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$SSE_HEADER = {1: "xmmintrin.h", 2: "emmintrin.h"}[SSE]
 #include <assert.h>
 
-#include <xmmintrin.h>
+#include <${SSE_HEADER}>
 
 #include <xnnpack/gemm.h>
 
 
-void xnn_f32_gemm${"inc" if INC else ""}_minmax_ukernel_${MR}x${NR}__sse_dup(
+$ISA = {1: "sse", 2: "sse2"}[SSE]
+void xnn_f32_gemm${"inc" if INC else ""}_minmax_ukernel_${MR}x${NR}__${ISA}_dup(
     size_t mr,
     size_t nc,
     size_t kc,
@@ -82,7 +84,10 @@
         $LLLL = str(L) * 4
 
         $for M in range(MR):
-          const __m128 va${M}c${LLLL} = _mm_shuffle_ps(va${M}, va${M}, _MM_SHUFFLE(${L}, ${L}, ${L}, ${L}));
+          $if SSE >= 2 and L < 3:
+            const __m128 va${M}c${LLLL} = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va${M}), _MM_SHUFFLE(${L}, ${L}, ${L}, ${L})));
+          $else:
+            const __m128 va${M}c${LLLL} = _mm_shuffle_ps(va${M}, va${M}, _MM_SHUFFLE(${L}, ${L}, ${L}, ${L}));
 
         $for N in range(0, NR, 4):
           const __m128 vb${ABC[N:N+4]}c${L} = _mm_load_ps(w + ${L * NR + N});
diff --git a/src/f32-gemm/wasmsimd-loadsplat.c.in b/src/f32-gemm/wasmsimd-loadsplat.c.in
index 81a0514..6e8cf24 100644
--- a/src/f32-gemm/wasmsimd-loadsplat.c.in
+++ b/src/f32-gemm/wasmsimd-loadsplat.c.in
@@ -16,7 +16,7 @@
 $ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower())
 $ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm"
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
-void xnn_f32_gemm${"inc" if INC else ""}${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__wasmsimd_loadsplat${ARCH_SUFFIX}(
+void xnn_f32_gemm${"inc" if INC else ""}${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__wasmsimd${ARCH_SUFFIX}_loadsplat(
     size_t mr,
     size_t nc,
     size_t kc,
diff --git a/src/f32-gemm/wasmsimd-splat.c.in b/src/f32-gemm/wasmsimd-splat.c.in
index 983a3dc..c3da311 100644
--- a/src/f32-gemm/wasmsimd-splat.c.in
+++ b/src/f32-gemm/wasmsimd-splat.c.in
@@ -16,7 +16,7 @@
 $ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower())
 $ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm"
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
-void xnn_f32_gemm${"inc" if INC else ""}${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__wasmsimd_splat${ARCH_SUFFIX}(
+void xnn_f32_gemm${"inc" if INC else ""}${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__wasmsimd${ARCH_SUFFIX}_splat(
     size_t mr,
     size_t nc,
     size_t kc,
diff --git a/src/f32-igemm/gen/1x8-minmax-sse2-dup.c b/src/f32-igemm/gen/1x8-minmax-sse2-dup.c
new file mode 100644
index 0000000..f2996c7
--- /dev/null
+++ b/src/f32-igemm/gen/1x8-minmax-sse2-dup.c
@@ -0,0 +1,154 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const __m128 va0 = _mm_loadu_ps(a0);
+        a0 += 4;
+
+
+        const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+
+        const __m128 vb0123c0 = _mm_load_ps(w + 0);
+        const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+
+        const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+
+        const __m128 vb0123c1 = _mm_load_ps(w + 8);
+        const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+
+        const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+
+        const __m128 vb0123c2 = _mm_load_ps(w + 16);
+        const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+
+        const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+
+        const __m128 vb0123c3 = _mm_load_ps(w + 24);
+        const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const __m128 vb0123 = _mm_load_ps(w);
+          const __m128 vb4567 = _mm_load_ps(w + 4);
+          w += 8;
+
+          const __m128 va0 = _mm_load1_ps(a0);
+          a0 += 1;
+
+          vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+          vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..50b5a81
--- /dev/null
+++ b/src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,112 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-splat.c b/src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..ecc8864
--- /dev/null
+++ b/src/f32-igemm/gen/1x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,150 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index f4af5f4..0000000
--- a/src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,112 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (1 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      a += 1;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 1 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index bf1928b..0000000
--- a/src/f32-igemm/gen/1x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,112 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (1 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      a += 1;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 1 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-arm.c b/src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 09d157c..0000000
--- a/src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (1 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      a += 1;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 1 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-x86.c b/src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index b83d203..0000000
--- a/src/f32-igemm/gen/1x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 1);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (1 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      a += 1;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 1 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc0x0123 = vacc0x4567;
-
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..24f5441
--- /dev/null
+++ b/src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,112 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-splat.c b/src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..b192a66
--- /dev/null
+++ b/src/f32-igemm/gen/1x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,150 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc0x0123 = vacc0x4567;
+
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x8-minmax-sse-dup.c b/src/f32-igemm/gen/3x8-minmax-sse-dup.c
new file mode 100644
index 0000000..bce36c6
--- /dev/null
+++ b/src/f32-igemm/gen/3x8-minmax-sse-dup.c
@@ -0,0 +1,240 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_3x8__sse_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const __m128 va0 = _mm_loadu_ps(a0);
+        a0 += 4;
+        const __m128 va1 = _mm_loadu_ps(a1);
+        a1 += 4;
+        const __m128 va2 = _mm_loadu_ps(a2);
+        a2 += 4;
+
+
+        const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
+        const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
+        const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
+
+        const __m128 vb0123c0 = _mm_load_ps(w + 0);
+        const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+
+        const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
+        const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
+        const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
+
+        const __m128 vb0123c1 = _mm_load_ps(w + 8);
+        const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+
+        const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
+        const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
+        const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
+
+        const __m128 vb0123c2 = _mm_load_ps(w + 16);
+        const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+
+        const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+
+        const __m128 vb0123c3 = _mm_load_ps(w + 24);
+        const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const __m128 vb0123 = _mm_load_ps(w);
+          const __m128 vb4567 = _mm_load_ps(w + 4);
+          w += 8;
+
+          const __m128 va0 = _mm_load1_ps(a0);
+          a0 += 1;
+          const __m128 va1 = _mm_load1_ps(a1);
+          a1 += 1;
+          const __m128 va2 = _mm_load1_ps(a2);
+          a2 += 1;
+
+          vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+          vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+          vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+          vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+          vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+          vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x8-minmax-sse-load1.c b/src/f32-igemm/gen/3x8-minmax-sse-load1.c
new file mode 100644
index 0000000..83958fa
--- /dev/null
+++ b/src/f32-igemm/gen/3x8-minmax-sse-load1.c
@@ -0,0 +1,170 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-load1.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_3x8__sse_load1(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+      do {
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x8-minmax-sse2-dup.c b/src/f32-igemm/gen/3x8-minmax-sse2-dup.c
new file mode 100644
index 0000000..ccf2406
--- /dev/null
+++ b/src/f32-igemm/gen/3x8-minmax-sse2-dup.c
@@ -0,0 +1,240 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const __m128 va0 = _mm_loadu_ps(a0);
+        a0 += 4;
+        const __m128 va1 = _mm_loadu_ps(a1);
+        a1 += 4;
+        const __m128 va2 = _mm_loadu_ps(a2);
+        a2 += 4;
+
+
+        const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+        const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
+        const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
+
+        const __m128 vb0123c0 = _mm_load_ps(w + 0);
+        const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+
+        const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+        const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
+        const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
+
+        const __m128 vb0123c1 = _mm_load_ps(w + 8);
+        const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+
+        const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+        const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
+        const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
+
+        const __m128 vb0123c2 = _mm_load_ps(w + 16);
+        const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+
+        const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+
+        const __m128 vb0123c3 = _mm_load_ps(w + 24);
+        const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const __m128 vb0123 = _mm_load_ps(w);
+          const __m128 vb4567 = _mm_load_ps(w + 4);
+          w += 8;
+
+          const __m128 va0 = _mm_load1_ps(a0);
+          a0 += 1;
+          const __m128 va1 = _mm_load1_ps(a1);
+          a1 += 1;
+          const __m128 va2 = _mm_load1_ps(a2);
+          a2 += 1;
+
+          vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+          vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+          vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+          vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+          vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+          vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..a4e4041
--- /dev/null
+++ b/src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,170 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-splat.c b/src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..a2467f2
--- /dev/null
+++ b/src/f32-igemm/gen/3x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,236 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+        const v128_t va1 = wasm_v128_load(a1);
+        a1 += 4;
+        const v128_t va2 = wasm_v128_load(a2);
+        a2 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+          const v128_t va1 = wasm_v32x4_load_splat(a1);
+          a1 += 1;
+          const v128_t va2 = wasm_v32x4_load_splat(a2);
+          a2 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index b156bba..0000000
--- a/src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,170 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (3 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      a += 3;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 3 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 64d23dd..0000000
--- a/src/f32-igemm/gen/3x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,170 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (3 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      a += 3;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 3 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-arm.c b/src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 64c3e07..0000000
--- a/src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (3 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      a += 3;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-        const v128_t va1 = wasm_v128_load(a1);
-        a1 += 4;
-        const v128_t va2 = wasm_v128_load(a2);
-        a2 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-          const v128_t va1 = wasm_v32x4_load_splat(a1);
-          a1 += 1;
-          const v128_t va2 = wasm_v32x4_load_splat(a2);
-          a2 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 3 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-x86.c b/src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index c0989c3..0000000
--- a/src/f32-igemm/gen/3x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 3);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (3 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      a += 3;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-        const v128_t va1 = wasm_v128_load(a1);
-        a1 += 4;
-        const v128_t va2 = wasm_v128_load(a2);
-        a2 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-          const v128_t va1 = wasm_v32x4_load_splat(a1);
-          a1 += 1;
-          const v128_t va2 = wasm_v32x4_load_splat(a2);
-          a2 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 3 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..7b8bb0f
--- /dev/null
+++ b/src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,170 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-splat.c b/src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..4c4e0c4
--- /dev/null
+++ b/src/f32-igemm/gen/3x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,236 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+        const v128_t va1 = wasm_v128_load(a1);
+        a1 += 4;
+        const v128_t va2 = wasm_v128_load(a2);
+        a2 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+          const v128_t va1 = wasm_v32x4_load_splat(a1);
+          a1 += 1;
+          const v128_t va2 = wasm_v32x4_load_splat(a2);
+          a2 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x8s4-minmax-sse.c b/src/f32-igemm/gen/3x8s4-minmax-sse.c
new file mode 100644
index 0000000..ab5fc84
--- /dev/null
+++ b/src/f32-igemm/gen/3x8s4-minmax-sse.c
@@ -0,0 +1,237 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_3x8s4__sse(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        __m128 va0 = _mm_loadu_ps(a0);
+        a0 += 4;
+        __m128 va1 = _mm_loadu_ps(a1);
+        a1 += 4;
+        __m128 va2 = _mm_loadu_ps(a2);
+        a2 += 4;
+
+
+        const __m128 vb0123c0 = _mm_load_ps(w + 0);
+        const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
+
+        va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+        va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+        va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+
+        const __m128 vb0123c1 = _mm_load_ps(w + 8);
+        const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
+
+        va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+        va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+        va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+
+        const __m128 vb0123c2 = _mm_load_ps(w + 16);
+        const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
+
+        va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+        va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+        va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+
+        const __m128 vb0123c3 = _mm_load_ps(w + 24);
+        const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const __m128 vb0123 = _mm_load_ps(w);
+          const __m128 vb4567 = _mm_load_ps(w + 4);
+          w += 8;
+
+          const __m128 va0 = _mm_load1_ps(a0);
+          a0 += 1;
+          const __m128 va1 = _mm_load1_ps(a1);
+          a1 += 1;
+          const __m128 va2 = _mm_load1_ps(a2);
+          a2 += 1;
+
+          vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+          vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+          vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+          vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+          vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+          vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/4x8-minmax-sse2-dup.c b/src/f32-igemm/gen/4x8-minmax-sse2-dup.c
new file mode 100644
index 0000000..edf93d2
--- /dev/null
+++ b/src/f32-igemm/gen/4x8-minmax-sse2-dup.c
@@ -0,0 +1,283 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const __m128 va0 = _mm_loadu_ps(a0);
+        a0 += 4;
+        const __m128 va1 = _mm_loadu_ps(a1);
+        a1 += 4;
+        const __m128 va2 = _mm_loadu_ps(a2);
+        a2 += 4;
+        const __m128 va3 = _mm_loadu_ps(a3);
+        a3 += 4;
+
+
+        const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+        const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
+        const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
+        const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
+
+        const __m128 vb0123c0 = _mm_load_ps(w + 0);
+        const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
+
+        const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+        const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
+        const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
+        const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
+
+        const __m128 vb0123c1 = _mm_load_ps(w + 8);
+        const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
+
+        const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+        const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
+        const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
+        const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
+
+        const __m128 vb0123c2 = _mm_load_ps(w + 16);
+        const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
+
+        const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
+
+        const __m128 vb0123c3 = _mm_load_ps(w + 24);
+        const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const __m128 vb0123 = _mm_load_ps(w);
+          const __m128 vb4567 = _mm_load_ps(w + 4);
+          w += 8;
+
+          const __m128 va0 = _mm_load1_ps(a0);
+          a0 += 1;
+          const __m128 va1 = _mm_load1_ps(a1);
+          a1 += 1;
+          const __m128 va2 = _mm_load1_ps(a2);
+          a2 += 1;
+          const __m128 va3 = _mm_load1_ps(a3);
+          a3 += 1;
+
+          vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+          vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+          vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+          vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+          vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+          vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+          vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+          vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..a47a41b
--- /dev/null
+++ b/src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,199 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-splat.c b/src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..5d8dc75
--- /dev/null
+++ b/src/f32-igemm/gen/4x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,279 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+        const v128_t va1 = wasm_v128_load(a1);
+        a1 += 4;
+        const v128_t va2 = wasm_v128_load(a2);
+        a2 += 4;
+        const v128_t va3 = wasm_v128_load(a3);
+        a3 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+          const v128_t va1 = wasm_v32x4_load_splat(a1);
+          a1 += 1;
+          const v128_t va2 = wasm_v32x4_load_splat(a2);
+          a2 += 1;
+          const v128_t va3 = wasm_v32x4_load_splat(a3);
+          a3 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index a57c142..0000000
--- a/src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,199 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (4 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    c3 = c2;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      a += 4;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 4 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 66e755e..0000000
--- a/src/f32-igemm/gen/4x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,199 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (4 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    c3 = c2;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      a += 4;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 4 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-arm.c b/src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 3a910b4..0000000
--- a/src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,279 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (4 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    c3 = c2;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      a += 4;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-        const v128_t va1 = wasm_v128_load(a1);
-        a1 += 4;
-        const v128_t va2 = wasm_v128_load(a2);
-        a2 += 4;
-        const v128_t va3 = wasm_v128_load(a3);
-        a3 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-          const v128_t va1 = wasm_v32x4_load_splat(a1);
-          a1 += 1;
-          const v128_t va2 = wasm_v32x4_load_splat(a2);
-          a2 += 1;
-          const v128_t va3 = wasm_v32x4_load_splat(a3);
-          a3 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 4 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-x86.c b/src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 50825d3..0000000
--- a/src/f32-igemm/gen/4x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,279 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (4 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    c3 = c2;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      a += 4;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-        const v128_t va1 = wasm_v128_load(a1);
-        a1 += 4;
-        const v128_t va2 = wasm_v128_load(a2);
-        a2 += 4;
-        const v128_t va3 = wasm_v128_load(a3);
-        a3 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-          const v128_t va1 = wasm_v32x4_load_splat(a1);
-          a1 += 1;
-          const v128_t va2 = wasm_v32x4_load_splat(a2);
-          a2 += 1;
-          const v128_t va3 = wasm_v32x4_load_splat(a3);
-          a3 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 4 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..9fa694b
--- /dev/null
+++ b/src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,199 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-splat.c b/src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..576a72d
--- /dev/null
+++ b/src/f32-igemm/gen/4x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,279 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+        const v128_t va1 = wasm_v128_load(a1);
+        a1 += 4;
+        const v128_t va2 = wasm_v128_load(a2);
+        a2 += 4;
+        const v128_t va3 = wasm_v128_load(a3);
+        a3 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+          const v128_t va1 = wasm_v32x4_load_splat(a1);
+          a1 += 1;
+          const v128_t va2 = wasm_v32x4_load_splat(a2);
+          a2 += 1;
+          const v128_t va3 = wasm_v32x4_load_splat(a3);
+          a3 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x8-minmax-sse-dup.c b/src/f32-igemm/gen/5x8-minmax-sse-dup.c
new file mode 100644
index 0000000..d118d48
--- /dev/null
+++ b/src/f32-igemm/gen/5x8-minmax-sse-dup.c
@@ -0,0 +1,326 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_5x8__sse_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (5 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    __m128 vacc4x0123 = vacc0x0123;
+    __m128 vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      a += 5;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const __m128 va0 = _mm_loadu_ps(a0);
+        a0 += 4;
+        const __m128 va1 = _mm_loadu_ps(a1);
+        a1 += 4;
+        const __m128 va2 = _mm_loadu_ps(a2);
+        a2 += 4;
+        const __m128 va3 = _mm_loadu_ps(a3);
+        a3 += 4;
+        const __m128 va4 = _mm_loadu_ps(a4);
+        a4 += 4;
+
+
+        const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
+        const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
+        const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
+        const __m128 va3c0000 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 0, 0, 0));
+        const __m128 va4c0000 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 0, 0, 0));
+
+        const __m128 vb0123c0 = _mm_load_ps(w + 0);
+        const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
+
+        const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
+        const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
+        const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
+        const __m128 va3c1111 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(1, 1, 1, 1));
+        const __m128 va4c1111 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(1, 1, 1, 1));
+
+        const __m128 vb0123c1 = _mm_load_ps(w + 8);
+        const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
+
+        const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
+        const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
+        const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
+        const __m128 va3c2222 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(2, 2, 2, 2));
+        const __m128 va4c2222 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(2, 2, 2, 2));
+
+        const __m128 vb0123c2 = _mm_load_ps(w + 16);
+        const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
+
+        const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
+
+        const __m128 vb0123c3 = _mm_load_ps(w + 24);
+        const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const __m128 vb0123 = _mm_load_ps(w);
+          const __m128 vb4567 = _mm_load_ps(w + 4);
+          w += 8;
+
+          const __m128 va0 = _mm_load1_ps(a0);
+          a0 += 1;
+          const __m128 va1 = _mm_load1_ps(a1);
+          a1 += 1;
+          const __m128 va2 = _mm_load1_ps(a2);
+          a2 += 1;
+          const __m128 va3 = _mm_load1_ps(a3);
+          a3 += 1;
+          const __m128 va4 = _mm_load1_ps(a4);
+          a4 += 1;
+
+          vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+          vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+          vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+          vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+          vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+          vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+          vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+          vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+          vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+          vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 5 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x8-minmax-sse-load1.c b/src/f32-igemm/gen/5x8-minmax-sse-load1.c
new file mode 100644
index 0000000..269a9e5
--- /dev/null
+++ b/src/f32-igemm/gen/5x8-minmax-sse-load1.c
@@ -0,0 +1,228 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-load1.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_5x8__sse_load1(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (5 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    __m128 vacc4x0123 = vacc0x0123;
+    __m128 vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      a += 5;
+
+      size_t k = kc;
+      do {
+        const __m128 vb0123 = _mm_load_ps(w);
+        const __m128 vb4567 = _mm_load_ps(w + 4);
+        w += 8;
+
+        const __m128 va0 = _mm_load1_ps(a0);
+        a0 += 1;
+        const __m128 va1 = _mm_load1_ps(a1);
+        a1 += 1;
+        const __m128 va2 = _mm_load1_ps(a2);
+        a2 += 1;
+        const __m128 va3 = _mm_load1_ps(a3);
+        a3 += 1;
+        const __m128 va4 = _mm_load1_ps(a4);
+        a4 += 1;
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 5 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x8-minmax-sse2-dup.c b/src/f32-igemm/gen/5x8-minmax-sse2-dup.c
new file mode 100644
index 0000000..e4f93c6
--- /dev/null
+++ b/src/f32-igemm/gen/5x8-minmax-sse2-dup.c
@@ -0,0 +1,326 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (5 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    __m128 vacc4x0123 = vacc0x0123;
+    __m128 vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      a += 5;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const __m128 va0 = _mm_loadu_ps(a0);
+        a0 += 4;
+        const __m128 va1 = _mm_loadu_ps(a1);
+        a1 += 4;
+        const __m128 va2 = _mm_loadu_ps(a2);
+        a2 += 4;
+        const __m128 va3 = _mm_loadu_ps(a3);
+        a3 += 4;
+        const __m128 va4 = _mm_loadu_ps(a4);
+        a4 += 4;
+
+
+        const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
+        const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
+        const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
+        const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
+        const __m128 va4c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(0, 0, 0, 0)));
+
+        const __m128 vb0123c0 = _mm_load_ps(w + 0);
+        const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
+
+        const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
+        const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
+        const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
+        const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
+        const __m128 va4c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(1, 1, 1, 1)));
+
+        const __m128 vb0123c1 = _mm_load_ps(w + 8);
+        const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
+
+        const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
+        const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
+        const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
+        const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
+        const __m128 va4c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(2, 2, 2, 2)));
+
+        const __m128 vb0123c2 = _mm_load_ps(w + 16);
+        const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
+
+        const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
+        const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
+
+        const __m128 vb0123c3 = _mm_load_ps(w + 24);
+        const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const __m128 vb0123 = _mm_load_ps(w);
+          const __m128 vb4567 = _mm_load_ps(w + 4);
+          w += 8;
+
+          const __m128 va0 = _mm_load1_ps(a0);
+          a0 += 1;
+          const __m128 va1 = _mm_load1_ps(a1);
+          a1 += 1;
+          const __m128 va2 = _mm_load1_ps(a2);
+          a2 += 1;
+          const __m128 va3 = _mm_load1_ps(a3);
+          a3 += 1;
+          const __m128 va4 = _mm_load1_ps(a4);
+          a4 += 1;
+
+          vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+          vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+          vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+          vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+          vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+          vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+          vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+          vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+          vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+          vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 5 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..baf20ec
--- /dev/null
+++ b/src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,228 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (5 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      a += 5;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 5 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-splat.c b/src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..1b1aa85
--- /dev/null
+++ b/src/f32-igemm/gen/5x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,322 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (5 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      a += 5;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+        const v128_t va1 = wasm_v128_load(a1);
+        a1 += 4;
+        const v128_t va2 = wasm_v128_load(a2);
+        a2 += 4;
+        const v128_t va3 = wasm_v128_load(a3);
+        a3 += 4;
+        const v128_t va4 = wasm_v128_load(a4);
+        a4 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+        const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+        const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+        const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+        const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+          const v128_t va1 = wasm_v32x4_load_splat(a1);
+          a1 += 1;
+          const v128_t va2 = wasm_v32x4_load_splat(a2);
+          a2 += 1;
+          const v128_t va3 = wasm_v32x4_load_splat(a3);
+          a3 += 1;
+          const v128_t va4 = wasm_v32x4_load_splat(a4);
+          a4 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+          vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+          vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 5 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index 3e36656..0000000
--- a/src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,228 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (5 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    c3 = c2;
-  }
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    c4 = c3;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      const float* restrict a4 = a[4];
-      assert(a4 != NULL);
-      if XNN_UNPREDICTABLE(a4 != zero) {
-        a4 = (const float*) ((uintptr_t) a4 + a_offset);
-      }
-      a += 5;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 5 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 7ee07b7..0000000
--- a/src/f32-igemm/gen/5x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,228 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (5 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    c3 = c2;
-  }
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    c4 = c3;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      const float* restrict a4 = a[4];
-      assert(a4 != NULL);
-      if XNN_UNPREDICTABLE(a4 != zero) {
-        a4 = (const float*) ((uintptr_t) a4 + a_offset);
-      }
-      a += 5;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 5 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-arm.c b/src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index b804ef4..0000000
--- a/src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,322 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (5 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    c3 = c2;
-  }
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    c4 = c3;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      const float* restrict a4 = a[4];
-      assert(a4 != NULL);
-      if XNN_UNPREDICTABLE(a4 != zero) {
-        a4 = (const float*) ((uintptr_t) a4 + a_offset);
-      }
-      a += 5;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-        const v128_t va1 = wasm_v128_load(a1);
-        a1 += 4;
-        const v128_t va2 = wasm_v128_load(a2);
-        a2 += 4;
-        const v128_t va3 = wasm_v128_load(a3);
-        a3 += 4;
-        const v128_t va4 = wasm_v128_load(a4);
-        a4 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-        const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-        const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-        const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-        const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-          const v128_t va1 = wasm_v32x4_load_splat(a1);
-          a1 += 1;
-          const v128_t va2 = wasm_v32x4_load_splat(a2);
-          a2 += 1;
-          const v128_t va3 = wasm_v32x4_load_splat(a3);
-          a3 += 1;
-          const v128_t va4 = wasm_v32x4_load_splat(a4);
-          a4 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-          vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-          vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 5 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-x86.c b/src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 8938b2f..0000000
--- a/src/f32-igemm/gen/5x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,322 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 5);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (5 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    c3 = c2;
-  }
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    c4 = c3;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      const float* restrict a4 = a[4];
-      assert(a4 != NULL);
-      if XNN_UNPREDICTABLE(a4 != zero) {
-        a4 = (const float*) ((uintptr_t) a4 + a_offset);
-      }
-      a += 5;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-        const v128_t va1 = wasm_v128_load(a1);
-        a1 += 4;
-        const v128_t va2 = wasm_v128_load(a2);
-        a2 += 4;
-        const v128_t va3 = wasm_v128_load(a3);
-        a3 += 4;
-        const v128_t va4 = wasm_v128_load(a4);
-        a4 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-        const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-        const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-        const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-        const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-          const v128_t va1 = wasm_v32x4_load_splat(a1);
-          a1 += 1;
-          const v128_t va2 = wasm_v32x4_load_splat(a2);
-          a2 += 1;
-          const v128_t va3 = wasm_v32x4_load_splat(a3);
-          a3 += 1;
-          const v128_t va4 = wasm_v32x4_load_splat(a4);
-          a4 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-          vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-          vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 5 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..4669f59
--- /dev/null
+++ b/src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,228 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (5 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      a += 5;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 5 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-splat.c b/src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..2c252f8
--- /dev/null
+++ b/src/f32-igemm/gen/5x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,322 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (5 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      a += 5;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+        const v128_t va1 = wasm_v128_load(a1);
+        a1 += 4;
+        const v128_t va2 = wasm_v128_load(a2);
+        a2 += 4;
+        const v128_t va3 = wasm_v128_load(a3);
+        a3 += 4;
+        const v128_t va4 = wasm_v128_load(a4);
+        a4 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+        const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+        const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+        const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+        const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+          const v128_t va1 = wasm_v32x4_load_splat(a1);
+          a1 += 1;
+          const v128_t va2 = wasm_v32x4_load_splat(a2);
+          a2 += 1;
+          const v128_t va3 = wasm_v32x4_load_splat(a3);
+          a3 += 1;
+          const v128_t va4 = wasm_v32x4_load_splat(a4);
+          a4 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+          vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+          vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 5 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x8s4-minmax-sse.c b/src/f32-igemm/gen/5x8s4-minmax-sse.c
new file mode 100644
index 0000000..bd3c29e
--- /dev/null
+++ b/src/f32-igemm/gen/5x8s4-minmax-sse.c
@@ -0,0 +1,321 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/sse-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <xmmintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_5x8s4__sse(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 5);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (5 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+
+  do {
+    __m128 vacc0x0123 = _mm_load_ps(w);
+    __m128 vacc0x4567 = _mm_load_ps(w + 4);
+    __m128 vacc1x0123 = vacc0x0123;
+    __m128 vacc1x4567 = vacc0x4567;
+    __m128 vacc2x0123 = vacc0x0123;
+    __m128 vacc2x4567 = vacc0x4567;
+    __m128 vacc3x0123 = vacc0x0123;
+    __m128 vacc3x4567 = vacc0x4567;
+    __m128 vacc4x0123 = vacc0x0123;
+    __m128 vacc4x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      a += 5;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        __m128 va0 = _mm_loadu_ps(a0);
+        a0 += 4;
+        __m128 va1 = _mm_loadu_ps(a1);
+        a1 += 4;
+        __m128 va2 = _mm_loadu_ps(a2);
+        a2 += 4;
+        __m128 va3 = _mm_loadu_ps(a3);
+        a3 += 4;
+        __m128 va4 = _mm_loadu_ps(a4);
+        a4 += 4;
+
+
+        const __m128 vb0123c0 = _mm_load_ps(w + 0);
+        const __m128 vb4567c0 = _mm_load_ps(w + 4);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
+
+        va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+        va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+        va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+        va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
+        va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
+
+        const __m128 vb0123c1 = _mm_load_ps(w + 8);
+        const __m128 vb4567c1 = _mm_load_ps(w + 12);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
+
+        va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+        va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+        va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+        va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
+        va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
+
+        const __m128 vb0123c2 = _mm_load_ps(w + 16);
+        const __m128 vb4567c2 = _mm_load_ps(w + 20);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
+
+        va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
+        va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
+        va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
+        va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
+        va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
+
+        const __m128 vb0123c3 = _mm_load_ps(w + 24);
+        const __m128 vb4567c3 = _mm_load_ps(w + 28);
+
+        vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
+        vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
+        vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
+        vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
+        vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
+        vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
+        vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
+        vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
+        vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
+        vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
+
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const __m128 vb0123 = _mm_load_ps(w);
+          const __m128 vb4567 = _mm_load_ps(w + 4);
+          w += 8;
+
+          const __m128 va0 = _mm_load1_ps(a0);
+          a0 += 1;
+          const __m128 va1 = _mm_load1_ps(a1);
+          a1 += 1;
+          const __m128 va2 = _mm_load1_ps(a2);
+          a2 += 1;
+          const __m128 va3 = _mm_load1_ps(a3);
+          a3 += 1;
+          const __m128 va4 = _mm_load1_ps(a4);
+          a4 += 1;
+
+          vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
+          vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
+          vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
+          vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
+          vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
+          vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
+          vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
+          vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
+          vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
+          vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 5 * sizeof(void*);
+    } while (p != 0);
+
+    const __m128 vmax = _mm_load_ps(params->sse.max);
+    vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
+    vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
+    vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
+    vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
+    vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
+    vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
+    vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
+    vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
+    vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
+    vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
+
+    const __m128 vmin = _mm_load_ps(params->sse.min);
+    vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
+    vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
+    vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
+    vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
+    vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
+    vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
+    vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
+    vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
+    vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
+    vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
+
+    if XNN_LIKELY(nc >= 8) {
+      _mm_storeu_ps(c4, vacc4x0123);
+      _mm_storeu_ps(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      _mm_storeu_ps(c3, vacc3x0123);
+      _mm_storeu_ps(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      _mm_storeu_ps(c2, vacc2x0123);
+      _mm_storeu_ps(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      _mm_storeu_ps(c1, vacc1x0123);
+      _mm_storeu_ps(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      _mm_storeu_ps(c0, vacc0x0123);
+      _mm_storeu_ps(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        _mm_storeu_ps(c4, vacc4x0123);
+        _mm_storeu_ps(c3, vacc3x0123);
+        _mm_storeu_ps(c2, vacc2x0123);
+        _mm_storeu_ps(c1, vacc1x0123);
+        _mm_storeu_ps(c0, vacc0x0123);
+
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        _mm_storel_pi((__m64*) c4, vacc4x0123);
+        _mm_storel_pi((__m64*) c3, vacc3x0123);
+        _mm_storel_pi((__m64*) c2, vacc2x0123);
+        _mm_storel_pi((__m64*) c1, vacc1x0123);
+        _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+        vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+        vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+        vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+        vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+        vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        _mm_store_ss(c4, vacc4x0123);
+        _mm_store_ss(c3, vacc3x0123);
+        _mm_store_ss(c2, vacc2x0123);
+        _mm_store_ss(c1, vacc1x0123);
+        _mm_store_ss(c0, vacc0x0123);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c b/src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c
new file mode 100644
index 0000000..de3900f
--- /dev/null
+++ b/src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-loadsplat.c
@@ -0,0 +1,257 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (6 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    c5 = c4;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    v128_t vacc5x0123 = vacc0x0123;
+    v128_t vacc5x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      const float* restrict a5 = a[5];
+      assert(a5 != NULL);
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const float*) ((uintptr_t) a5 + a_offset);
+      }
+      a += 6;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+        const v128_t va5 = wasm_v32x4_load_splat(a5);
+        a5 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 6 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-splat.c b/src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-splat.c
new file mode 100644
index 0000000..d080a9c
--- /dev/null
+++ b/src/f32-igemm/gen/6x8-minmax-wasmsimd-arm-splat.c
@@ -0,0 +1,365 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (6 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    c5 = c4;
+  }
+
+  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    v128_t vacc5x0123 = vacc0x0123;
+    v128_t vacc5x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      const float* restrict a5 = a[5];
+      assert(a5 != NULL);
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const float*) ((uintptr_t) a5 + a_offset);
+      }
+      a += 6;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+        const v128_t va1 = wasm_v128_load(a1);
+        a1 += 4;
+        const v128_t va2 = wasm_v128_load(a2);
+        a2 += 4;
+        const v128_t va3 = wasm_v128_load(a3);
+        a3 += 4;
+        const v128_t va4 = wasm_v128_load(a4);
+        a4 += 4;
+        const v128_t va5 = wasm_v128_load(a5);
+        a5 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+        const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+        const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+        const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+        const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+        const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+        const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+        const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+        const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+          const v128_t va1 = wasm_v32x4_load_splat(a1);
+          a1 += 1;
+          const v128_t va2 = wasm_v32x4_load_splat(a2);
+          a2 += 1;
+          const v128_t va3 = wasm_v32x4_load_splat(a3);
+          a3 += 1;
+          const v128_t va4 = wasm_v32x4_load_splat(a4);
+          a4 += 1;
+          const v128_t va5 = wasm_v32x4_load_splat(a5);
+          a5 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+          vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+          vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+          vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+          vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 6 * sizeof(void*);
+    } while (p != 0);
+
+    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
+    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
+    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
+    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
+    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
+    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
+    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
+    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
+    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
+    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
+    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
+    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
+
+    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
+    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
+    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
+    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
+    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
+    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
+    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
+    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
+    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
+    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
+    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
+    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c b/src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c
deleted file mode 100644
index 817bce3..0000000
--- a/src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-arm.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (6 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    c3 = c2;
-  }
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    c4 = c3;
-  }
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    c5 = c4;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    v128_t vacc5x0123 = vacc0x0123;
-    v128_t vacc5x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      const float* restrict a4 = a[4];
-      assert(a4 != NULL);
-      if XNN_UNPREDICTABLE(a4 != zero) {
-        a4 = (const float*) ((uintptr_t) a4 + a_offset);
-      }
-      const float* restrict a5 = a[5];
-      assert(a5 != NULL);
-      if XNN_UNPREDICTABLE(a5 != zero) {
-        a5 = (const float*) ((uintptr_t) a5 + a_offset);
-      }
-      a += 6;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-        const v128_t va5 = wasm_v32x4_load_splat(a5);
-        a5 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 6 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c b/src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c
deleted file mode 100644
index 4590d6c..0000000
--- a/src/f32-igemm/gen/6x8-minmax-wasmsimd-loadsplat-x86.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (6 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    c3 = c2;
-  }
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    c4 = c3;
-  }
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    c5 = c4;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    v128_t vacc5x0123 = vacc0x0123;
-    v128_t vacc5x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      const float* restrict a4 = a[4];
-      assert(a4 != NULL);
-      if XNN_UNPREDICTABLE(a4 != zero) {
-        a4 = (const float*) ((uintptr_t) a4 + a_offset);
-      }
-      const float* restrict a5 = a[5];
-      assert(a5 != NULL);
-      if XNN_UNPREDICTABLE(a5 != zero) {
-        a5 = (const float*) ((uintptr_t) a5 + a_offset);
-      }
-      a += 6;
-
-      size_t k = kc;
-      do {
-        const v128_t vb0123 = wasm_v128_load(w);
-        const v128_t vb4567 = wasm_v128_load(w + 4);
-        w += 8;
-
-        const v128_t va0 = wasm_v32x4_load_splat(a0);
-        a0 += 1;
-        const v128_t va1 = wasm_v32x4_load_splat(a1);
-        a1 += 1;
-        const v128_t va2 = wasm_v32x4_load_splat(a2);
-        a2 += 1;
-        const v128_t va3 = wasm_v32x4_load_splat(a3);
-        a3 += 1;
-        const v128_t va4 = wasm_v32x4_load_splat(a4);
-        a4 += 1;
-        const v128_t va5 = wasm_v32x4_load_splat(a5);
-        a5 += 1;
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-        k -= sizeof(float);
-      } while (k != 0);
-      p -= 6 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-arm.c b/src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 47569f2..0000000
--- a/src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,365 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (6 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    c3 = c2;
-  }
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    c4 = c3;
-  }
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    c5 = c4;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    v128_t vacc5x0123 = vacc0x0123;
-    v128_t vacc5x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      const float* restrict a4 = a[4];
-      assert(a4 != NULL);
-      if XNN_UNPREDICTABLE(a4 != zero) {
-        a4 = (const float*) ((uintptr_t) a4 + a_offset);
-      }
-      const float* restrict a5 = a[5];
-      assert(a5 != NULL);
-      if XNN_UNPREDICTABLE(a5 != zero) {
-        a5 = (const float*) ((uintptr_t) a5 + a_offset);
-      }
-      a += 6;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-        const v128_t va1 = wasm_v128_load(a1);
-        a1 += 4;
-        const v128_t va2 = wasm_v128_load(a2);
-        a2 += 4;
-        const v128_t va3 = wasm_v128_load(a3);
-        a3 += 4;
-        const v128_t va4 = wasm_v128_load(a4);
-        a4 += 4;
-        const v128_t va5 = wasm_v128_load(a5);
-        a5 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-        const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-        const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-        const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-        const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-        const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-        const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-        const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-        const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-          const v128_t va1 = wasm_v32x4_load_splat(a1);
-          a1 += 1;
-          const v128_t va2 = wasm_v32x4_load_splat(a2);
-          a2 += 1;
-          const v128_t va3 = wasm_v32x4_load_splat(a3);
-          a3 += 1;
-          const v128_t va4 = wasm_v32x4_load_splat(a4);
-          a4 += 1;
-          const v128_t va5 = wasm_v32x4_load_splat(a5);
-          a5 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-          vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-          vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-          vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-          vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 6 * sizeof(void*);
-    } while (p != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc4x0123 = wasm_f32x4_max(vacc4x0123, vmin);
-    vacc5x0123 = wasm_f32x4_max(vacc5x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-    vacc4x4567 = wasm_f32x4_max(vacc4x4567, vmin);
-    vacc5x4567 = wasm_f32x4_max(vacc5x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc4x0123 = wasm_f32x4_min(vacc4x0123, vmax);
-    vacc5x0123 = wasm_f32x4_min(vacc5x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-    vacc4x4567 = wasm_f32x4_min(vacc4x4567, vmax);
-    vacc5x4567 = wasm_f32x4_min(vacc5x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-x86.c b/src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 825fd24..0000000
--- a/src/f32-igemm/gen/6x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,365 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-igemm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/igemm.h>
-
-
-void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86(
-    size_t mr,
-    size_t nc,
-    size_t kc,
-    size_t ks,
-    const float**restrict a,
-    const float*restrict w,
-    float*restrict c,
-    size_t cm_stride,
-    size_t cn_stride,
-    size_t a_offset,
-    const float* zero,
-    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 6);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-  assert(ks != 0);
-  assert(ks % (6 * sizeof(void*)) == 0);
-  assert(a_offset % sizeof(float) == 0);
-  assert(a != NULL);
-  assert(w != NULL);
-  assert(c != NULL);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 4) {
-    c3 = c2;
-  }
-  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 4) {
-    c4 = c3;
-  }
-  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 6) {
-    c5 = c4;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    v128_t vacc4x0123 = vacc0x0123;
-    v128_t vacc4x4567 = vacc0x4567;
-    v128_t vacc5x0123 = vacc0x0123;
-    v128_t vacc5x4567 = vacc0x4567;
-    w += 8;
-
-    size_t p = ks;
-    do {
-      const float* restrict a0 = a[0];
-      assert(a0 != NULL);
-      if XNN_UNPREDICTABLE(a0 != zero) {
-        a0 = (const float*) ((uintptr_t) a0 + a_offset);
-      }
-      const float* restrict a1 = a[1];
-      assert(a1 != NULL);
-      if XNN_UNPREDICTABLE(a1 != zero) {
-        a1 = (const float*) ((uintptr_t) a1 + a_offset);
-      }
-      const float* restrict a2 = a[2];
-      assert(a2 != NULL);
-      if XNN_UNPREDICTABLE(a2 != zero) {
-        a2 = (const float*) ((uintptr_t) a2 + a_offset);
-      }
-      const float* restrict a3 = a[3];
-      assert(a3 != NULL);
-      if XNN_UNPREDICTABLE(a3 != zero) {
-        a3 = (const float*) ((uintptr_t) a3 + a_offset);
-      }
-      const float* restrict a4 = a[4];
-      assert(a4 != NULL);
-      if XNN_UNPREDICTABLE(a4 != zero) {
-        a4 = (const float*) ((uintptr_t) a4 + a_offset);
-      }
-      const float* restrict a5 = a[5];
-      assert(a5 != NULL);
-      if XNN_UNPREDICTABLE(a5 != zero) {
-        a5 = (const float*) ((uintptr_t) a5 + a_offset);
-      }
-      a += 6;
-
-      size_t k = kc;
-      while (k >= 4 * sizeof(float)) {
-        const v128_t va0 = wasm_v128_load(a0);
-        a0 += 4;
-        const v128_t va1 = wasm_v128_load(a1);
-        a1 += 4;
-        const v128_t va2 = wasm_v128_load(a2);
-        a2 += 4;
-        const v128_t va3 = wasm_v128_load(a3);
-        a3 += 4;
-        const v128_t va4 = wasm_v128_load(a4);
-        a4 += 4;
-        const v128_t va5 = wasm_v128_load(a5);
-        a5 += 4;
-
-        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
-        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
-        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
-        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
-        const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
-        const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
-
-        const v128_t vb0123c0 = wasm_v128_load(w + 0);
-        const v128_t vb4567c0 = wasm_v128_load(w + 4);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
-        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
-        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
-        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
-        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
-        const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
-        const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
-
-        const v128_t vb0123c1 = wasm_v128_load(w + 8);
-        const v128_t vb4567c1 = wasm_v128_load(w + 12);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
-        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
-        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
-        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
-        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
-        const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
-        const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
-
-        const v128_t vb0123c2 = wasm_v128_load(w + 16);
-        const v128_t vb4567c2 = wasm_v128_load(w + 20);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
-        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
-        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
-        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
-        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
-        const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
-        const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
-
-        const v128_t vb0123c3 = wasm_v128_load(w + 24);
-        const v128_t vb4567c3 = wasm_v128_load(w + 28);
-
-        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
-        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
-        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
-        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
-        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
-        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
-        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
-        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
-        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
-        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
-        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
-        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
-
-        w += 32;
-        k -= 4 * sizeof(float);
-      }
-      if XNN_UNLIKELY(k != 0) {
-        do {
-          const v128_t vb0123 = wasm_v128_load(w);
-          const v128_t vb4567 = wasm_v128_load(w + 4);
-          w += 8;
-
-          const v128_t va0 = wasm_v32x4_load_splat(a0);
-          a0 += 1;
-          const v128_t va1 = wasm_v32x4_load_splat(a1);
-          a1 += 1;
-          const v128_t va2 = wasm_v32x4_load_splat(a2);
-          a2 += 1;
-          const v128_t va3 = wasm_v32x4_load_splat(a3);
-          a3 += 1;
-          const v128_t va4 = wasm_v32x4_load_splat(a4);
-          a4 += 1;
-          const v128_t va5 = wasm_v32x4_load_splat(a5);
-          a5 += 1;
-
-          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
-          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
-          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
-          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
-          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
-          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
-          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
-          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
-          vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
-          vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
-          vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
-          vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
-          k -= sizeof(float);
-        } while (k != 0);
-      }
-      p -= 6 * sizeof(void*);
-    } while (p != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
-    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
-    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
-    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
-    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c5, vacc5x0123);
-      wasm_v128_store(c5 + 4, vacc5x4567);
-      c5 = (float*) ((uintptr_t) c5 + cn_stride);
-      wasm_v128_store(c4, vacc4x0123);
-      wasm_v128_store(c4 + 4, vacc4x4567);
-      c4 = (float*) ((uintptr_t) c4 + cn_stride);
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      a = (const float**restrict) ((uintptr_t) a - ks);
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c5, vacc5x0123);
-        wasm_v128_store(c4, vacc4x0123);
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc5x0123 = vacc5x4567;
-        vacc4x0123 = vacc4x4567;
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c5 += 4;
-        c4 += 4;
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
-        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
-        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c5 += 2;
-        c4 += 2;
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
-        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c b/src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c
new file mode 100644
index 0000000..a9feb09
--- /dev/null
+++ b/src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-loadsplat.c
@@ -0,0 +1,257 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-loadsplat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (6 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    c5 = c4;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    v128_t vacc5x0123 = vacc0x0123;
+    v128_t vacc5x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      const float* restrict a5 = a[5];
+      assert(a5 != NULL);
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const float*) ((uintptr_t) a5 + a_offset);
+      }
+      a += 6;
+
+      size_t k = kc;
+      do {
+        const v128_t vb0123 = wasm_v128_load(w);
+        const v128_t vb4567 = wasm_v128_load(w + 4);
+        w += 8;
+
+        const v128_t va0 = wasm_v32x4_load_splat(a0);
+        a0 += 1;
+        const v128_t va1 = wasm_v32x4_load_splat(a1);
+        a1 += 1;
+        const v128_t va2 = wasm_v32x4_load_splat(a2);
+        a2 += 1;
+        const v128_t va3 = wasm_v32x4_load_splat(a3);
+        a3 += 1;
+        const v128_t va4 = wasm_v32x4_load_splat(a4);
+        a4 += 1;
+        const v128_t va5 = wasm_v32x4_load_splat(a5);
+        a5 += 1;
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+        k -= sizeof(float);
+      } while (k != 0);
+      p -= 6 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-splat.c b/src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-splat.c
new file mode 100644
index 0000000..dd0a239
--- /dev/null
+++ b/src/f32-igemm/gen/6x8-minmax-wasmsimd-x86-splat.c
@@ -0,0 +1,365 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-igemm/wasmsimd-splat.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const float**restrict a,
+    const float*restrict w,
+    float*restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const float* zero,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(float) == 0);
+  assert(ks != 0);
+  assert(ks % (6 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(float) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  float* c0 = c;
+  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    c5 = c4;
+  }
+
+  do {
+    v128_t vacc0x0123 = wasm_v128_load(w);
+    v128_t vacc0x4567 = wasm_v128_load(w + 4);
+    v128_t vacc1x0123 = vacc0x0123;
+    v128_t vacc1x4567 = vacc0x4567;
+    v128_t vacc2x0123 = vacc0x0123;
+    v128_t vacc2x4567 = vacc0x4567;
+    v128_t vacc3x0123 = vacc0x0123;
+    v128_t vacc3x4567 = vacc0x4567;
+    v128_t vacc4x0123 = vacc0x0123;
+    v128_t vacc4x4567 = vacc0x4567;
+    v128_t vacc5x0123 = vacc0x0123;
+    v128_t vacc5x4567 = vacc0x4567;
+    w += 8;
+
+    size_t p = ks;
+    do {
+      const float* restrict a0 = a[0];
+      assert(a0 != NULL);
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const float*) ((uintptr_t) a0 + a_offset);
+      }
+      const float* restrict a1 = a[1];
+      assert(a1 != NULL);
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const float*) ((uintptr_t) a1 + a_offset);
+      }
+      const float* restrict a2 = a[2];
+      assert(a2 != NULL);
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const float*) ((uintptr_t) a2 + a_offset);
+      }
+      const float* restrict a3 = a[3];
+      assert(a3 != NULL);
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const float*) ((uintptr_t) a3 + a_offset);
+      }
+      const float* restrict a4 = a[4];
+      assert(a4 != NULL);
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const float*) ((uintptr_t) a4 + a_offset);
+      }
+      const float* restrict a5 = a[5];
+      assert(a5 != NULL);
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const float*) ((uintptr_t) a5 + a_offset);
+      }
+      a += 6;
+
+      size_t k = kc;
+      while (k >= 4 * sizeof(float)) {
+        const v128_t va0 = wasm_v128_load(a0);
+        a0 += 4;
+        const v128_t va1 = wasm_v128_load(a1);
+        a1 += 4;
+        const v128_t va2 = wasm_v128_load(a2);
+        a2 += 4;
+        const v128_t va3 = wasm_v128_load(a3);
+        a3 += 4;
+        const v128_t va4 = wasm_v128_load(a4);
+        a4 += 4;
+        const v128_t va5 = wasm_v128_load(a5);
+        a5 += 4;
+
+        const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
+        const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
+        const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
+        const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
+        const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
+        const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
+
+        const v128_t vb0123c0 = wasm_v128_load(w + 0);
+        const v128_t vb4567c0 = wasm_v128_load(w + 4);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0));
+        const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
+        const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
+        const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
+        const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
+        const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
+        const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
+
+        const v128_t vb0123c1 = wasm_v128_load(w + 8);
+        const v128_t vb4567c1 = wasm_v128_load(w + 12);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c1, vb0123c1));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c1, vb0123c1));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c1, vb0123c1));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c1, vb4567c1));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c1, vb4567c1));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c1, vb4567c1));
+        const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
+        const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
+        const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
+        const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
+        const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
+        const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
+
+        const v128_t vb0123c2 = wasm_v128_load(w + 16);
+        const v128_t vb4567c2 = wasm_v128_load(w + 20);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c2, vb0123c2));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c2, vb0123c2));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c2, vb4567c2));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c2, vb4567c2));
+        const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
+        const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
+        const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
+        const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
+        const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
+        const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
+
+        const v128_t vb0123c3 = wasm_v128_load(w + 24);
+        const v128_t vb4567c3 = wasm_v128_load(w + 28);
+
+        vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
+        vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
+        vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
+        vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c3, vb0123c3));
+        vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c3, vb0123c3));
+        vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3));
+        vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
+        vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
+        vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
+        vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c3, vb4567c3));
+        vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c3, vb4567c3));
+        vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3));
+
+        w += 32;
+        k -= 4 * sizeof(float);
+      }
+      if XNN_UNLIKELY(k != 0) {
+        do {
+          const v128_t vb0123 = wasm_v128_load(w);
+          const v128_t vb4567 = wasm_v128_load(w + 4);
+          w += 8;
+
+          const v128_t va0 = wasm_v32x4_load_splat(a0);
+          a0 += 1;
+          const v128_t va1 = wasm_v32x4_load_splat(a1);
+          a1 += 1;
+          const v128_t va2 = wasm_v32x4_load_splat(a2);
+          a2 += 1;
+          const v128_t va3 = wasm_v32x4_load_splat(a3);
+          a3 += 1;
+          const v128_t va4 = wasm_v32x4_load_splat(a4);
+          a4 += 1;
+          const v128_t va5 = wasm_v32x4_load_splat(a5);
+          a5 += 1;
+
+          vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
+          vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
+          vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
+          vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
+          vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
+          vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
+          vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
+          vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
+          vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
+          vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
+          vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
+          vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
+          k -= sizeof(float);
+        } while (k != 0);
+      }
+      p -= 6 * sizeof(void*);
+    } while (p != 0);
+
+    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
+    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
+    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
+    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
+    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
+    vacc4x0123 = wasm_v128_bitselect(vmin, vacc4x0123, wasm_f32x4_lt(vacc4x0123, vmin));
+    vacc5x0123 = wasm_v128_bitselect(vmin, vacc5x0123, wasm_f32x4_lt(vacc5x0123, vmin));
+    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
+    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
+    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
+    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
+    vacc4x4567 = wasm_v128_bitselect(vmin, vacc4x4567, wasm_f32x4_lt(vacc4x4567, vmin));
+    vacc5x4567 = wasm_v128_bitselect(vmin, vacc5x4567, wasm_f32x4_lt(vacc5x4567, vmin));
+
+    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
+    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
+    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
+    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
+    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
+    vacc4x0123 = wasm_v128_bitselect(vacc4x0123, vmax, wasm_f32x4_le(vacc4x0123, vmax));
+    vacc5x0123 = wasm_v128_bitselect(vacc5x0123, vmax, wasm_f32x4_le(vacc5x0123, vmax));
+    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
+    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
+    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
+    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
+    vacc4x4567 = wasm_v128_bitselect(vacc4x4567, vmax, wasm_f32x4_le(vacc4x4567, vmax));
+    vacc5x4567 = wasm_v128_bitselect(vacc5x4567, vmax, wasm_f32x4_le(vacc5x4567, vmax));
+
+    if XNN_LIKELY(nc >= 8) {
+      wasm_v128_store(c5, vacc5x0123);
+      wasm_v128_store(c5 + 4, vacc5x4567);
+      c5 = (float*) ((uintptr_t) c5 + cn_stride);
+      wasm_v128_store(c4, vacc4x0123);
+      wasm_v128_store(c4 + 4, vacc4x4567);
+      c4 = (float*) ((uintptr_t) c4 + cn_stride);
+      wasm_v128_store(c3, vacc3x0123);
+      wasm_v128_store(c3 + 4, vacc3x4567);
+      c3 = (float*) ((uintptr_t) c3 + cn_stride);
+      wasm_v128_store(c2, vacc2x0123);
+      wasm_v128_store(c2 + 4, vacc2x4567);
+      c2 = (float*) ((uintptr_t) c2 + cn_stride);
+      wasm_v128_store(c1, vacc1x0123);
+      wasm_v128_store(c1 + 4, vacc1x4567);
+      c1 = (float*) ((uintptr_t) c1 + cn_stride);
+      wasm_v128_store(c0, vacc0x0123);
+      wasm_v128_store(c0 + 4, vacc0x4567);
+      c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const float**restrict) ((uintptr_t) a - ks);
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        wasm_v128_store(c5, vacc5x0123);
+        wasm_v128_store(c4, vacc4x0123);
+        wasm_v128_store(c3, vacc3x0123);
+        wasm_v128_store(c2, vacc2x0123);
+        wasm_v128_store(c1, vacc1x0123);
+        wasm_v128_store(c0, vacc0x0123);
+
+        vacc5x0123 = vacc5x4567;
+        vacc4x0123 = vacc4x4567;
+        vacc3x0123 = vacc3x4567;
+        vacc2x0123 = vacc2x4567;
+        vacc1x0123 = vacc1x4567;
+        vacc0x0123 = vacc0x4567;
+
+        c5 += 4;
+        c4 += 4;
+        c3 += 4;
+        c2 += 4;
+        c1 += 4;
+        c0 += 4;
+      }
+      if (nc & 2) {
+        *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
+        *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
+        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
+        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
+        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
+        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
+
+        vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
+        vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
+        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
+        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
+        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
+        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
+
+        c5 += 2;
+        c4 += 2;
+        c3 += 2;
+        c2 += 2;
+        c1 += 2;
+        c0 += 2;
+      }
+      if (nc & 1) {
+        *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
+        *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
+        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
+        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
+        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
+        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/f32-igemm/sse-dup.c.in b/src/f32-igemm/sse-dup.c.in
index 8a0b296..6142c27 100644
--- a/src/f32-igemm/sse-dup.c.in
+++ b/src/f32-igemm/sse-dup.c.in
@@ -5,14 +5,16 @@
 
 $assert NR % 4 == 0
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$SSE_HEADER = {1: "xmmintrin.h", 2: "emmintrin.h"}[SSE]
 #include <assert.h>
 
-#include <xmmintrin.h>
+#include <${SSE_HEADER}>
 
 #include <xnnpack/igemm.h>
 
 
-void xnn_f32_igemm_minmax_ukernel_${MR}x${NR}__sse_dup(
+$ISA = {1: "sse", 2: "sse2"}[SSE]
+void xnn_f32_igemm_minmax_ukernel_${MR}x${NR}__${ISA}_dup(
     size_t mr,
     size_t nc,
     size_t kc,
@@ -83,7 +85,10 @@
           $LLLL = str(L) * 4
 
           $for M in range(MR):
-            const __m128 va${M}c${LLLL} = _mm_shuffle_ps(va${M}, va${M}, _MM_SHUFFLE(${L}, ${L}, ${L}, ${L}));
+            $if SSE >= 2 and L < 3:
+              const __m128 va${M}c${LLLL} = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va${M}), _MM_SHUFFLE(${L}, ${L}, ${L}, ${L})));
+            $else:
+              const __m128 va${M}c${LLLL} = _mm_shuffle_ps(va${M}, va${M}, _MM_SHUFFLE(${L}, ${L}, ${L}, ${L}));
 
           $for N in range(0, NR, 4):
             const __m128 vb${ABC[N:N+4]}c${L} = _mm_load_ps(w + ${L * NR + N});
diff --git a/src/f32-igemm/wasmsimd-loadsplat.c.in b/src/f32-igemm/wasmsimd-loadsplat.c.in
index 41cde8a..d0cd4cc 100644
--- a/src/f32-igemm/wasmsimd-loadsplat.c.in
+++ b/src/f32-igemm/wasmsimd-loadsplat.c.in
@@ -16,7 +16,7 @@
 $ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower())
 $ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm"
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
-void xnn_f32_igemm${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__wasmsimd_loadsplat${ARCH_SUFFIX}(
+void xnn_f32_igemm${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__wasmsimd${ARCH_SUFFIX}_loadsplat(
     size_t mr,
     size_t nc,
     size_t kc,
diff --git a/src/f32-igemm/wasmsimd-splat.c.in b/src/f32-igemm/wasmsimd-splat.c.in
index 244eebf..8d3d6a1 100644
--- a/src/f32-igemm/wasmsimd-splat.c.in
+++ b/src/f32-igemm/wasmsimd-splat.c.in
@@ -16,7 +16,7 @@
 $ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower())
 $ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm"
 $PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
-void xnn_f32_igemm${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__wasmsimd_splat${ARCH_SUFFIX}(
+void xnn_f32_igemm${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__wasmsimd${ARCH_SUFFIX}_splat(
     size_t mr,
     size_t nc,
     size_t kc,
diff --git a/src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-arm.c b/src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-arm.c
deleted file mode 100644
index 43ebba5..0000000
--- a/src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-arm.c
+++ /dev/null
@@ -1,166 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-ppmm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/ppmm.h>
-
-
-void xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm(
-  size_t mr,
-  size_t nc,
-  size_t kc,
-  const float*restrict a,
-  const float*restrict w,
-  float*restrict c,
-  size_t cm_stride,
-  size_t cn_stride,
-  const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    c3 = c2;
-  }
-
-  const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-  const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0123 = wasm_v128_load(a);
-      a += 4;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      const v128_t va0000 = wasm_v32x4_shuffle(va0123, va0123, 0, 0, 0, 0);
-      const v128_t va1111 = wasm_v32x4_shuffle(va0123, va0123, 1, 1, 1, 1);
-      const v128_t va2222 = wasm_v32x4_shuffle(va0123, va0123, 2, 2, 2, 2);
-      const v128_t va3333 = wasm_v32x4_shuffle(va0123, va0123, 3, 3, 3, 3);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0000, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1111, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2222, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3333, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0000, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1111, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2222, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3333, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
-    vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
-    vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
-    vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
-    vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
-    vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
-    vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
-    vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
-
-    vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
-    vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
-    vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
-    vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
-    vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
-    vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
-    vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
-    vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-
-      a = (const float*) ((uintptr_t) a - kc * 4);
-
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-x86.c b/src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-x86.c
deleted file mode 100644
index 629e651..0000000
--- a/src/f32-ppmm/gen/4x8-minmax-wasmsimd-splat-x86.c
+++ /dev/null
@@ -1,166 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-ppmm/wasmsimd-splat.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2020 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <wasm_simd128.h>
-
-#include <xnnpack/ppmm.h>
-
-
-void xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86(
-  size_t mr,
-  size_t nc,
-  size_t kc,
-  const float*restrict a,
-  const float*restrict w,
-  float*restrict c,
-  size_t cm_stride,
-  size_t cn_stride,
-  const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
-{
-  assert(mr != 0);
-  assert(mr <= 4);
-  assert(nc != 0);
-  assert(kc != 0);
-  assert(kc % sizeof(float) == 0);
-
-  float* c0 = c;
-  float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
-  if XNN_UNPREDICTABLE(mr < 2) {
-    c1 = c0;
-  }
-  float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
-  if XNN_UNPREDICTABLE(mr <= 2) {
-    c2 = c1;
-  }
-  float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
-  if XNN_UNPREDICTABLE(mr != 4) {
-    c3 = c2;
-  }
-
-  do {
-    v128_t vacc0x0123 = wasm_v128_load(w);
-    v128_t vacc0x4567 = wasm_v128_load(w + 4);
-    v128_t vacc1x0123 = vacc0x0123;
-    v128_t vacc1x4567 = vacc0x4567;
-    v128_t vacc2x0123 = vacc0x0123;
-    v128_t vacc2x4567 = vacc0x4567;
-    v128_t vacc3x0123 = vacc0x0123;
-    v128_t vacc3x4567 = vacc0x4567;
-    w += 8;
-
-    size_t k = kc;
-    do {
-      const v128_t va0123 = wasm_v128_load(a);
-      a += 4;
-
-      const v128_t vb0123 = wasm_v128_load(w);
-      const v128_t vb4567 = wasm_v128_load(w + 4);
-      w += 8;
-
-      const v128_t va0000 = wasm_v32x4_shuffle(va0123, va0123, 0, 0, 0, 0);
-      const v128_t va1111 = wasm_v32x4_shuffle(va0123, va0123, 1, 1, 1, 1);
-      const v128_t va2222 = wasm_v32x4_shuffle(va0123, va0123, 2, 2, 2, 2);
-      const v128_t va3333 = wasm_v32x4_shuffle(va0123, va0123, 3, 3, 3, 3);
-
-      vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0000, vb0123));
-      vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1111, vb0123));
-      vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2222, vb0123));
-      vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3333, vb0123));
-      vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0000, vb4567));
-      vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1111, vb4567));
-      vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2222, vb4567));
-      vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3333, vb4567));
-
-      k -= sizeof(float);
-    } while (k != 0);
-
-    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
-    vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
-    vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
-    vacc2x0123 = wasm_v128_bitselect(vmin, vacc2x0123, wasm_f32x4_lt(vacc2x0123, vmin));
-    vacc3x0123 = wasm_v128_bitselect(vmin, vacc3x0123, wasm_f32x4_lt(vacc3x0123, vmin));
-    vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
-    vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
-    vacc2x4567 = wasm_v128_bitselect(vmin, vacc2x4567, wasm_f32x4_lt(vacc2x4567, vmin));
-    vacc3x4567 = wasm_v128_bitselect(vmin, vacc3x4567, wasm_f32x4_lt(vacc3x4567, vmin));
-
-    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
-    vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
-    vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
-    vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vmax, wasm_f32x4_le(vacc2x0123, vmax));
-    vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vmax, wasm_f32x4_le(vacc3x0123, vmax));
-    vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
-    vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
-    vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vmax, wasm_f32x4_le(vacc2x4567, vmax));
-    vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vmax, wasm_f32x4_le(vacc3x4567, vmax));
-
-    if XNN_LIKELY(nc >= 8) {
-      wasm_v128_store(c3, vacc3x0123);
-      wasm_v128_store(c3 + 4, vacc3x4567);
-      wasm_v128_store(c2, vacc2x0123);
-      wasm_v128_store(c2 + 4, vacc2x4567);
-      wasm_v128_store(c1, vacc1x0123);
-      wasm_v128_store(c1 + 4, vacc1x4567);
-      wasm_v128_store(c0, vacc0x0123);
-      wasm_v128_store(c0 + 4, vacc0x4567);
-
-      a = (const float*) ((uintptr_t) a - kc * 4);
-
-      c3 = (float*) ((uintptr_t) c3 + cn_stride);
-      c2 = (float*) ((uintptr_t) c2 + cn_stride);
-      c1 = (float*) ((uintptr_t) c1 + cn_stride);
-      c0 = (float*) ((uintptr_t) c0 + cn_stride);
-
-      nc -= 8;
-    } else {
-      if (nc & 4) {
-        wasm_v128_store(c3, vacc3x0123);
-        wasm_v128_store(c2, vacc2x0123);
-        wasm_v128_store(c1, vacc1x0123);
-        wasm_v128_store(c0, vacc0x0123);
-
-        vacc3x0123 = vacc3x4567;
-        vacc2x0123 = vacc2x4567;
-        vacc1x0123 = vacc1x4567;
-        vacc0x0123 = vacc0x4567;
-
-        c3 += 4;
-        c2 += 4;
-        c1 += 4;
-        c0 += 4;
-      }
-      if (nc & 2) {
-        *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
-        *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
-        *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
-        *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
-
-        vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
-        vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
-        vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
-        vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
-
-        c3 += 2;
-        c2 += 2;
-        c1 += 2;
-        c0 += 2;
-      }
-      if (nc & 1) {
-        *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
-        *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
-        *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
-        *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
-      }
-
-      nc = 0;
-    }
-  } while (nc != 0);
-}
diff --git a/src/f32-ppmm/wasmsimd-splat.c.in b/src/f32-ppmm/wasmsimd-splat.c.in
index 370f4d5..cd62863 100644
--- a/src/f32-ppmm/wasmsimd-splat.c.in
+++ b/src/f32-ppmm/wasmsimd-splat.c.in
@@ -13,7 +13,7 @@
 #include <xnnpack/ppmm.h>
 
 
-void xnn_f32_ppmm_minmax_ukernel_${MR}x${NR}__wasmsimd_splat_${"x86" if X86 else "arm"}(
+void xnn_f32_ppmm_minmax_ukernel_${MR}x${NR}__wasmsimd_${"x86" if X86 else "arm"}_splat(
   size_t mr,
   size_t nc,
   size_t kc,
diff --git a/src/f32-sigmoid/gen/neon-frac-p9-p10-nr1recps-x16.c b/src/f32-sigmoid/gen/neon-frac-p9-p10-nr1recps-x16.c
deleted file mode 100644
index f67bba0..0000000
--- a/src/f32-sigmoid/gen/neon-frac-p9-p10-nr1recps-x16.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// Auto-generated file. Do not edit!
-//   Template: src/f32-sigmoid/neon-frac-p9-p10-nr1recps.c.in
-//   Generator: tools/xngen
-//
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <arm_neon.h>
-
-#include <xnnpack/common.h>
-#include <xnnpack/vunary.h>
-
-
-void xnn_f32_sigmoid_ukernel__neon_frac_p9_p10_nr1recps_x16(
-    size_t n,
-    const float* x,
-    float* y,
-    const void* params) XNN_DISABLE_TSAN
-{
-  assert(n % sizeof(float) == 0);
-
-  const float32x4_t vhalf = vmovq_n_f32(0.5f);
-
-  // The coefficients of the numerator polynomial (odd).
-  const float32x4_t valpha_1 = vmovq_n_f32(2.48287947061529e-01);
-  const float32x4_t valpha_3 = vmovq_n_f32(8.51377133304701e-03);
-  const float32x4_t valpha_5 = vmovq_n_f32(6.08574864600143e-05);
-  const float32x4_t valpha_7 = vmovq_n_f32(1.15627324459942e-07);
-  const float32x4_t valpha_9 = vmovq_n_f32(4.37031012579801e-11);
-
-  // The coefficients of the denominator polynomial (even).
-  const float32x4_t vbeta_0 =  vmovq_n_f32(9.93151921023180e-01);
-  const float32x4_t vbeta_2 =  vmovq_n_f32(1.16817656904453e-01);
-  const float32x4_t vbeta_4 =  vmovq_n_f32(1.70198817374094e-03);
-  const float32x4_t vbeta_6 =  vmovq_n_f32(6.29106785017040e-06);
-  const float32x4_t vbeta_8 =  vmovq_n_f32(5.76102136993427e-09);
-  const float32x4_t vbeta_10 = vmovq_n_f32(6.10247389755681e-13);
-
-  // Sigmoid ~saturates outside of this range anyway.
-  const float32x4_t vsigmoid_maxinput = vdupq_n_f32(18.f);
-  const float32x4_t vsigmoid_mininput = vdupq_n_f32(-18.f);
-
-  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
-    float32x4_t vn0123 = vld1q_f32(x); x += 4;
-    float32x4_t vn4567 = vld1q_f32(x); x += 4;
-    float32x4_t vn89AB = vld1q_f32(x); x += 4;
-    float32x4_t vnCDEF = vld1q_f32(x); x += 4;
-
-    // restrict range to avoid overflow, output saturates outside this anyway
-    vn0123 = vminq_f32(vn0123, vsigmoid_maxinput);
-    vn0123 = vmaxq_f32(vn0123, vsigmoid_mininput);
-    vn4567 = vminq_f32(vn4567, vsigmoid_maxinput);
-    vn4567 = vmaxq_f32(vn4567, vsigmoid_mininput);
-    vn89AB = vminq_f32(vn89AB, vsigmoid_maxinput);
-    vn89AB = vmaxq_f32(vn89AB, vsigmoid_mininput);
-    vnCDEF = vminq_f32(vnCDEF, vsigmoid_maxinput);
-    vnCDEF = vmaxq_f32(vnCDEF, vsigmoid_mininput);
-
-    // square the input
-    const float32x4_t vn0123_sq = vmulq_f32(vn0123, vn0123);
-    const float32x4_t vn4567_sq = vmulq_f32(vn4567, vn4567);
-    const float32x4_t vn89AB_sq = vmulq_f32(vn89AB, vn89AB);
-    const float32x4_t vnCDEF_sq = vmulq_f32(vnCDEF, vnCDEF);
-
-    // Evaluate numerator polynomial
-    float32x4_t vnum0123 = vmlaq_f32(valpha_7, vn0123_sq, valpha_9);
-    float32x4_t vnum4567 = vmlaq_f32(valpha_7, vn4567_sq, valpha_9);
-    float32x4_t vnum89AB = vmlaq_f32(valpha_7, vn89AB_sq, valpha_9);
-    float32x4_t vnumCDEF = vmlaq_f32(valpha_7, vnCDEF_sq, valpha_9);
-
-    vnum0123 = vmlaq_f32(valpha_5, vn0123_sq, vnum0123);
-    vnum4567 = vmlaq_f32(valpha_5, vn4567_sq, vnum4567);
-    vnum89AB = vmlaq_f32(valpha_5, vn89AB_sq, vnum89AB);
-    vnumCDEF = vmlaq_f32(valpha_5, vnCDEF_sq, vnumCDEF);
-
-    vnum0123 = vmlaq_f32(valpha_3, vn0123_sq, vnum0123);
-    vnum4567 = vmlaq_f32(valpha_3, vn4567_sq, vnum4567);
-    vnum89AB = vmlaq_f32(valpha_3, vn89AB_sq, vnum89AB);
-    vnumCDEF = vmlaq_f32(valpha_3, vnCDEF_sq, vnumCDEF);
-
-    vnum0123 = vmlaq_f32(valpha_1, vn0123_sq, vnum0123);
-    vnum4567 = vmlaq_f32(valpha_1, vn4567_sq, vnum4567);
-    vnum89AB = vmlaq_f32(valpha_1, vn89AB_sq, vnum89AB);
-    vnumCDEF = vmlaq_f32(valpha_1, vnCDEF_sq, vnumCDEF);
-
-    vnum0123 = vmulq_f32(vn0123, vnum0123);
-    vnum4567 = vmulq_f32(vn4567, vnum4567);
-    vnum89AB = vmulq_f32(vn89AB, vnum89AB);
-    vnumCDEF = vmulq_f32(vnCDEF, vnumCDEF);
-
-    // Evaluate denominator polynomial
-    float32x4_t vdenom0123 = vmlaq_f32(vbeta_8, vn0123_sq, vbeta_10);
-    float32x4_t vdenom4567 = vmlaq_f32(vbeta_8, vn4567_sq, vbeta_10);
-    float32x4_t vdenom89AB = vmlaq_f32(vbeta_8, vn89AB_sq, vbeta_10);
-    float32x4_t vdenomCDEF = vmlaq_f32(vbeta_8, vnCDEF_sq, vbeta_10);
-
-    vdenom0123 = vmlaq_f32(vbeta_6, vn0123_sq, vdenom0123);
-    vdenom4567 = vmlaq_f32(vbeta_6, vn4567_sq, vdenom4567);
-    vdenom89AB = vmlaq_f32(vbeta_6, vn89AB_sq, vdenom89AB);
-    vdenomCDEF = vmlaq_f32(vbeta_6, vnCDEF_sq, vdenomCDEF);
-
-    vdenom0123 = vmlaq_f32(vbeta_4, vn0123_sq, vdenom0123);
-    vdenom4567 = vmlaq_f32(vbeta_4, vn4567_sq, vdenom4567);
-    vdenom89AB = vmlaq_f32(vbeta_4, vn89AB_sq, vdenom89AB);
-    vdenomCDEF = vmlaq_f32(vbeta_4, vnCDEF_sq, vdenomCDEF);
-
-    vdenom0123 = vmlaq_f32(vbeta_2, vn0123_sq, vdenom0123);
-    vdenom4567 = vmlaq_f32(vbeta_2, vn4567_sq, vdenom4567);
-    vdenom89AB = vmlaq_f32(vbeta_2, vn89AB_sq, vdenom89AB);
-    vdenomCDEF = vmlaq_f32(vbeta_2, vnCDEF_sq, vdenomCDEF);
-
-    vdenom0123 = vmlaq_f32(vbeta_0, vn0123_sq, vdenom0123);
-    vdenom4567 = vmlaq_f32(vbeta_0, vn4567_sq, vdenom4567);
-    vdenom89AB = vmlaq_f32(vbeta_0, vn89AB_sq, vdenom89AB);
-    vdenomCDEF = vmlaq_f32(vbeta_0, vnCDEF_sq, vdenomCDEF);
-
-    // Do division 1. / denom
-    float32x4_t vrecp0123 = vrecpeq_f32(vdenom0123);
-    float32x4_t vrecp4567 = vrecpeq_f32(vdenom4567);
-    float32x4_t vrecp89AB = vrecpeq_f32(vdenom89AB);
-    float32x4_t vrecpCDEF = vrecpeq_f32(vdenomCDEF);
-
-    // One NR iteration
-    vrecp0123 = vmulq_f32(vrecp0123, vrecpsq_f32(vrecp0123, vdenom0123));
-    vrecp4567 = vmulq_f32(vrecp4567, vrecpsq_f32(vrecp4567, vdenom4567));
-    vrecp89AB = vmulq_f32(vrecp89AB, vrecpsq_f32(vrecp89AB, vdenom89AB));
-    vrecpCDEF = vmulq_f32(vrecpCDEF, vrecpsq_f32(vrecpCDEF, vdenomCDEF));
-
-
-    // .5 + num * (1. / denom)
-    const float32x4_t vsigmoid0123 = vmlaq_f32(vhalf, vnum0123, vrecp0123);
-    const float32x4_t vsigmoid4567 = vmlaq_f32(vhalf, vnum4567, vrecp4567);
-    const float32x4_t vsigmoid89AB = vmlaq_f32(vhalf, vnum89AB, vrecp89AB);
-    const float32x4_t vsigmoidCDEF = vmlaq_f32(vhalf, vnumCDEF, vrecpCDEF);
-
-
-    vst1q_f32(y, vsigmoid0123); y += 4;
-    vst1q_f32(y, vsigmoid4567); y += 4;
-    vst1q_f32(y, vsigmoid89AB); y += 4;
-    vst1q_f32(y, vsigmoidCDEF); y += 4;
-  }
-  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
-    float32x4_t vn0123 = vld1q_f32(x); x += 4;
-
-    vn0123 = vminq_f32(vn0123, vsigmoid_maxinput);
-    vn0123 = vmaxq_f32(vn0123, vsigmoid_mininput);
-
-    const float32x4_t vn0123_sq = vmulq_f32(vn0123, vn0123);
-
-    // Evaluate numerator polynomial
-    float32x4_t vnum0123 = vmlaq_f32(valpha_7, vn0123_sq, valpha_9);
-
-    vnum0123 = vmlaq_f32(valpha_5, vn0123_sq, vnum0123);
-    vnum0123 = vmlaq_f32(valpha_3, vn0123_sq, vnum0123);
-    vnum0123 = vmlaq_f32(valpha_1, vn0123_sq, vnum0123);
-    vnum0123 = vmulq_f32(vn0123, vnum0123);
-
-    // Evaluate denominator polynomial
-
-    float32x4_t vdenom0123 = vmlaq_f32(vbeta_8, vn0123_sq, vbeta_10);
-    vdenom0123 = vmlaq_f32(vbeta_6, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_4, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_2, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_0, vn0123_sq, vdenom0123);
-
-    // Do division, one NR iteration
-
-    float32x4_t vrecp0123 = vrecpeq_f32(vdenom0123);
-    vrecp0123 = vmulq_f32(vrecp0123, vrecpsq_f32(vrecp0123, vdenom0123));
-
-    const float32x4_t vsigmoid0123 = vmlaq_f32(vhalf, vnum0123, vrecp0123);
-
-    vst1q_f32(y, vsigmoid0123); y += 4;
-  }
-  if XNN_UNLIKELY(n != 0) {
-    float32x4_t vn0123 = vld1q_f32(x);
-
-    vn0123 = vminq_f32(vn0123, vsigmoid_maxinput);
-    vn0123 = vmaxq_f32(vn0123, vsigmoid_mininput);
-
-    const float32x4_t vn0123_sq = vmulq_f32(vn0123, vn0123);
-
-    // Evaluate numerator polynomial
-    float32x4_t vnum0123 = vmlaq_f32(valpha_7, vn0123_sq, valpha_9);
-
-    vnum0123 = vmlaq_f32(valpha_5, vn0123_sq, vnum0123);
-    vnum0123 = vmlaq_f32(valpha_3, vn0123_sq, vnum0123);
-    vnum0123 = vmlaq_f32(valpha_1, vn0123_sq, vnum0123);
-    vnum0123 = vmulq_f32(vn0123, vnum0123);
-
-    // Evaluate denominator polynomial
-
-    float32x4_t vdenom0123 = vmlaq_f32(vbeta_8, vn0123_sq, vbeta_10);
-    vdenom0123 = vmlaq_f32(vbeta_6, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_4, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_2, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_0, vn0123_sq, vdenom0123);
-
-    // Do division, one NR iteration
-
-    float32x4_t vrecp0123 = vrecpeq_f32(vdenom0123);
-    vrecp0123 = vmulq_f32(vrecp0123, vrecpsq_f32(vrecp0123, vdenom0123));
-
-    const float32x4_t vsigmoid0123 = vmlaq_f32(vhalf, vnum0123, vrecp0123);
-
-    float32x2_t vf01 = vget_low_f32(vsigmoid0123);
-    if (n & (2 * sizeof(float))) {
-      vst1_f32(y, vf01); y += 2;
-      vf01 = vget_high_f32(vsigmoid0123);
-    }
-    if (n & (1 * sizeof(float))) {
-      vst1_lane_f32(y, vf01, 0);
-    }
-  }
-}
diff --git a/src/f32-sigmoid/neon-frac-p9-p10-nr1recps.c.in b/src/f32-sigmoid/neon-frac-p9-p10-nr1recps.c.in
deleted file mode 100644
index a8a7755..0000000
--- a/src/f32-sigmoid/neon-frac-p9-p10-nr1recps.c.in
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-$assert BATCH_TILE % 4 == 0
-$assert BATCH_TILE >= 4
-$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-#include <assert.h>
-
-#include <arm_neon.h>
-
-#include <xnnpack/common.h>
-#include <xnnpack/vunary.h>
-
-
-void xnn_f32_sigmoid_ukernel__neon_frac_p9_p10_nr1recps_x${BATCH_TILE}(
-    size_t n,
-    const float* x,
-    float* y,
-    const void* params) XNN_DISABLE_TSAN
-{
-  assert(n % sizeof(float) == 0);
-
-  const float32x4_t vhalf = vmovq_n_f32(0.5f);
-
-  // The coefficients of the numerator polynomial (odd).
-  const float32x4_t valpha_1 = vmovq_n_f32(2.48287947061529e-01);
-  const float32x4_t valpha_3 = vmovq_n_f32(8.51377133304701e-03);
-  const float32x4_t valpha_5 = vmovq_n_f32(6.08574864600143e-05);
-  const float32x4_t valpha_7 = vmovq_n_f32(1.15627324459942e-07);
-  const float32x4_t valpha_9 = vmovq_n_f32(4.37031012579801e-11);
-
-  // The coefficients of the denominator polynomial (even).
-  const float32x4_t vbeta_0 =  vmovq_n_f32(9.93151921023180e-01);
-  const float32x4_t vbeta_2 =  vmovq_n_f32(1.16817656904453e-01);
-  const float32x4_t vbeta_4 =  vmovq_n_f32(1.70198817374094e-03);
-  const float32x4_t vbeta_6 =  vmovq_n_f32(6.29106785017040e-06);
-  const float32x4_t vbeta_8 =  vmovq_n_f32(5.76102136993427e-09);
-  const float32x4_t vbeta_10 = vmovq_n_f32(6.10247389755681e-13);
-
-  // Sigmoid ~saturates outside of this range anyway.
-  const float32x4_t vsigmoid_maxinput = vdupq_n_f32(18.f);
-  const float32x4_t vsigmoid_mininput = vdupq_n_f32(-18.f);
-
-  for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
-    $for N in range(0, BATCH_TILE, 4):
-      float32x4_t vn${ABC[N:N+4]} = vld1q_f32(x); x += 4;
-
-    // restrict range to avoid overflow, output saturates outside this anyway
-    $for N in range(0, BATCH_TILE, 4):
-      vn${ABC[N:N+4]} = vminq_f32(vn${ABC[N:N+4]}, vsigmoid_maxinput);
-      vn${ABC[N:N+4]} = vmaxq_f32(vn${ABC[N:N+4]}, vsigmoid_mininput);
-
-    // square the input
-    $for N in range(0, BATCH_TILE, 4):
-      const float32x4_t vn${ABC[N:N+4]}_sq = vmulq_f32(vn${ABC[N:N+4]}, vn${ABC[N:N+4]});
-
-    // Evaluate numerator polynomial
-    $for N in range(0, BATCH_TILE, 4):
-      float32x4_t vnum${ABC[N:N+4]} = vmlaq_f32(valpha_7, vn${ABC[N:N+4]}_sq, valpha_9);
-
-    $for N in range(0, BATCH_TILE, 4):
-      vnum${ABC[N:N+4]} = vmlaq_f32(valpha_5, vn${ABC[N:N+4]}_sq, vnum${ABC[N:N+4]});
-
-    $for N in range(0, BATCH_TILE, 4):
-      vnum${ABC[N:N+4]} = vmlaq_f32(valpha_3, vn${ABC[N:N+4]}_sq, vnum${ABC[N:N+4]});
-
-    $for N in range(0, BATCH_TILE, 4):
-      vnum${ABC[N:N+4]} = vmlaq_f32(valpha_1, vn${ABC[N:N+4]}_sq, vnum${ABC[N:N+4]});
-
-    $for N in range(0, BATCH_TILE, 4):
-      vnum${ABC[N:N+4]} = vmulq_f32(vn${ABC[N:N+4]}, vnum${ABC[N:N+4]});
-
-    // Evaluate denominator polynomial
-    $for N in range(0, BATCH_TILE, 4):
-      float32x4_t vdenom${ABC[N:N+4]} = vmlaq_f32(vbeta_8, vn${ABC[N:N+4]}_sq, vbeta_10);
-
-    $for N in range(0, BATCH_TILE, 4):
-      vdenom${ABC[N:N+4]} = vmlaq_f32(vbeta_6, vn${ABC[N:N+4]}_sq, vdenom${ABC[N:N+4]});
-
-    $for N in range(0, BATCH_TILE, 4):
-      vdenom${ABC[N:N+4]} = vmlaq_f32(vbeta_4, vn${ABC[N:N+4]}_sq, vdenom${ABC[N:N+4]});
-
-    $for N in range(0, BATCH_TILE, 4):
-      vdenom${ABC[N:N+4]} = vmlaq_f32(vbeta_2, vn${ABC[N:N+4]}_sq, vdenom${ABC[N:N+4]});
-
-    $for N in range(0, BATCH_TILE, 4):
-      vdenom${ABC[N:N+4]} = vmlaq_f32(vbeta_0, vn${ABC[N:N+4]}_sq, vdenom${ABC[N:N+4]});
-
-    // Do division 1. / denom
-    $for N in range(0, BATCH_TILE, 4):
-      float32x4_t vrecp${ABC[N:N+4]} = vrecpeq_f32(vdenom${ABC[N:N+4]});
-
-    // One NR iteration
-    $for N in range(0, BATCH_TILE, 4):
-      vrecp${ABC[N:N+4]} = vmulq_f32(vrecp${ABC[N:N+4]}, vrecpsq_f32(vrecp${ABC[N:N+4]}, vdenom${ABC[N:N+4]}));
-
-
-    // .5 + num * (1. / denom)
-    $for N in range(0, BATCH_TILE, 4):
-      const float32x4_t vsigmoid${ABC[N:N+4]} = vmlaq_f32(vhalf, vnum${ABC[N:N+4]}, vrecp${ABC[N:N+4]});
-
-
-    $for N in range(0, BATCH_TILE, 4):
-      vst1q_f32(y, vsigmoid${ABC[N:N+4]}); y += 4;
-  }
-  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
-    float32x4_t vn0123 = vld1q_f32(x); x += 4;
-
-    vn0123 = vminq_f32(vn0123, vsigmoid_maxinput);
-    vn0123 = vmaxq_f32(vn0123, vsigmoid_mininput);
-
-    const float32x4_t vn0123_sq = vmulq_f32(vn0123, vn0123);
-
-    // Evaluate numerator polynomial
-    float32x4_t vnum0123 = vmlaq_f32(valpha_7, vn0123_sq, valpha_9);
-
-    vnum0123 = vmlaq_f32(valpha_5, vn0123_sq, vnum0123);
-    vnum0123 = vmlaq_f32(valpha_3, vn0123_sq, vnum0123);
-    vnum0123 = vmlaq_f32(valpha_1, vn0123_sq, vnum0123);
-    vnum0123 = vmulq_f32(vn0123, vnum0123);
-
-    // Evaluate denominator polynomial
-
-    float32x4_t vdenom0123 = vmlaq_f32(vbeta_8, vn0123_sq, vbeta_10);
-    vdenom0123 = vmlaq_f32(vbeta_6, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_4, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_2, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_0, vn0123_sq, vdenom0123);
-
-    // Do division, one NR iteration
-
-    float32x4_t vrecp0123 = vrecpeq_f32(vdenom0123);
-    vrecp0123 = vmulq_f32(vrecp0123, vrecpsq_f32(vrecp0123, vdenom0123));
-
-    const float32x4_t vsigmoid0123 = vmlaq_f32(vhalf, vnum0123, vrecp0123);
-
-    vst1q_f32(y, vsigmoid0123); y += 4;
-  }
-  if XNN_UNLIKELY(n != 0) {
-    float32x4_t vn0123 = vld1q_f32(x);
-
-    vn0123 = vminq_f32(vn0123, vsigmoid_maxinput);
-    vn0123 = vmaxq_f32(vn0123, vsigmoid_mininput);
-
-    const float32x4_t vn0123_sq = vmulq_f32(vn0123, vn0123);
-
-    // Evaluate numerator polynomial
-    float32x4_t vnum0123 = vmlaq_f32(valpha_7, vn0123_sq, valpha_9);
-
-    vnum0123 = vmlaq_f32(valpha_5, vn0123_sq, vnum0123);
-    vnum0123 = vmlaq_f32(valpha_3, vn0123_sq, vnum0123);
-    vnum0123 = vmlaq_f32(valpha_1, vn0123_sq, vnum0123);
-    vnum0123 = vmulq_f32(vn0123, vnum0123);
-
-    // Evaluate denominator polynomial
-
-    float32x4_t vdenom0123 = vmlaq_f32(vbeta_8, vn0123_sq, vbeta_10);
-    vdenom0123 = vmlaq_f32(vbeta_6, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_4, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_2, vn0123_sq, vdenom0123);
-    vdenom0123 = vmlaq_f32(vbeta_0, vn0123_sq, vdenom0123);
-
-    // Do division, one NR iteration
-
-    float32x4_t vrecp0123 = vrecpeq_f32(vdenom0123);
-    vrecp0123 = vmulq_f32(vrecp0123, vrecpsq_f32(vrecp0123, vdenom0123));
-
-    const float32x4_t vsigmoid0123 = vmlaq_f32(vhalf, vnum0123, vrecp0123);
-
-    float32x2_t vf01 = vget_low_f32(vsigmoid0123);
-    if (n & (2 * sizeof(float))) {
-      vst1_f32(y, vf01); y += 2;
-      vf01 = vget_high_f32(vsigmoid0123);
-    }
-    if (n & (1 * sizeof(float))) {
-      vst1_lane_f32(y, vf01, 0);
-    }
-  }
-}
diff --git a/src/f32-spmm/gen/12x1-minmax-neon.c b/src/f32-spmm/gen/12x1-minmax-neon.c
new file mode 100644
index 0000000..47725c5
--- /dev/null
+++ b/src/f32-spmm/gen/12x1-minmax-neon.c
@@ -0,0 +1,187 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_12x1__neon(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 12 * sizeof(float);
+  while XNN_LIKELY(mc >= 12 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+      float32x4_t vacc4567 = vacc0123;
+      float32x4_t vacc89AB = vacc0123;
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          const intptr_t diff = *dmap++;
+          const float32x4_t vi0123 = vld1q_f32(input);
+          const float32x4_t vi4567 = vld1q_f32(input + 4);
+          const float32x4_t vi89AB = vld1q_f32(input + 8);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vout89AB = vmaxq_f32(vout89AB, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      vst1q_f32(output + 8, vout89AB);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 12;
+    mc -= 12 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 4 * sizeof(float);
+    if (mc & (8 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 8;
+    }
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc01 = vmla_f32(vacc01, vi01, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc0 = vmla_f32(vacc0, vi0, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/16x1-minmax-neon-pipelined.c b/src/f32-spmm/gen/16x1-minmax-neon-pipelined.c
new file mode 100644
index 0000000..679bdb7
--- /dev/null
+++ b/src/f32-spmm/gen/16x1-minmax-neon-pipelined.c
@@ -0,0 +1,208 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon-pipelined.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 16 * sizeof(float);
+  while XNN_LIKELY(mc >= 16 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    float32x4_t vw = vld1q_dup_f32(w); w += 1;
+    intptr_t diff = *dmap++;
+    float32x4_t vi0123 = vld1q_f32(input);
+    float32x4_t vi4567 = vld1q_f32(input + 4);
+    float32x4_t vi89AB = vld1q_f32(input + 8);
+    float32x4_t viCDEF = vld1q_f32(input + 12);
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123 = vw;
+      float32x4_t vacc4567 = vw;
+      float32x4_t vacc89AB = vw;
+      float32x4_t vaccCDEF = vw;
+      vw = vld1q_dup_f32(w); w += 1;
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
+          vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          diff = *dmap++;
+          vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vi0123 = vld1q_f32(input);
+          vi4567 = vld1q_f32(input + 4);
+          vi89AB = vld1q_f32(input + 8);
+          viCDEF = vld1q_f32(input + 12);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+      float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vout89AB = vmaxq_f32(vout89AB, vmin);
+      voutCDEF = vmaxq_f32(voutCDEF, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      vst1q_f32(output + 8, vout89AB);
+      vst1q_f32(output + 12, voutCDEF);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 16;
+    mc -= 16 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 8 * sizeof(float);
+    if (mc & (8 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            const float32x4_t vb = vld1q_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vb);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 8;
+    }
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            const float32x4_t vb = vld1q_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            const float32x2_t vb = vld1_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc01 = vmla_f32(vacc01, vi01, vb);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            const float32x2_t vb = vld1_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0 = vmla_f32(vacc0, vi0, vb);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/16x1-minmax-neon-x2.c b/src/f32-spmm/gen/16x1-minmax-neon-x2.c
new file mode 100644
index 0000000..91a1299
--- /dev/null
+++ b/src/f32-spmm/gen/16x1-minmax-neon-x2.c
@@ -0,0 +1,234 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_16x1__neon_x2(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 16 * sizeof(float);
+  while XNN_LIKELY(mc >= 16 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
+      float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
+      float32x4_t vacc4567x0 = vacc0123x0;
+      float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
+      float32x4_t vacc89ABx0 = vacc0123x0;
+      float32x4_t vacc89ABx1 = vmovq_n_f32(0.0f);
+      float32x4_t vaccCDEFx0 = vacc0123x0;
+      float32x4_t vaccCDEFx1 = vmovq_n_f32(0.0f);
+      for (; nnz >= 2; nnz -= 2) {
+        const intptr_t diff0 = dmap[0];
+        const intptr_t diff1 = dmap[1];
+        dmap += 2;
+        const float32x4_t vi0123x0 = vld1q_f32(input);
+        const float32x4_t vi4567x0 = vld1q_f32(input + 4);
+        const float32x4_t vi89ABx0 = vld1q_f32(input + 8);
+        const float32x4_t viCDEFx0 = vld1q_f32(input + 12);
+        input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
+        __builtin_prefetch(input + 16);
+        const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
+        __builtin_prefetch(w + 32);
+        vacc0123x0 = vmlaq_f32(vacc0123x0, vi0123x0, vw0);
+        vacc4567x0 = vmlaq_f32(vacc4567x0, vi4567x0, vw0);
+        vacc89ABx0 = vmlaq_f32(vacc89ABx0, vi89ABx0, vw0);
+        vaccCDEFx0 = vmlaq_f32(vaccCDEFx0, viCDEFx0, vw0);
+        const float32x4_t vi0123x1 = vld1q_f32(input);
+        const float32x4_t vi4567x1 = vld1q_f32(input + 4);
+        const float32x4_t vi89ABx1 = vld1q_f32(input + 8);
+        const float32x4_t viCDEFx1 = vld1q_f32(input + 12);
+        input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
+        __builtin_prefetch(input + 16);
+        const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
+        __builtin_prefetch(w + 32);
+        vacc0123x1 = vmlaq_f32(vacc0123x1, vi0123x1, vw1);
+        vacc4567x1 = vmlaq_f32(vacc4567x1, vi4567x1, vw1);
+        vacc89ABx1 = vmlaq_f32(vacc89ABx1, vi89ABx1, vw1);
+        vaccCDEFx1 = vmlaq_f32(vaccCDEFx1, viCDEFx1, vw1);
+      }
+      float32x4_t vacc0123 = vacc0123x0;
+      float32x4_t vacc4567 = vacc4567x0;
+      float32x4_t vacc89AB = vacc89ABx0;
+      float32x4_t vaccCDEF = vaccCDEFx0;
+      vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
+      vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
+      vacc89AB = vaddq_f32(vacc89AB, vacc89ABx1);
+      vaccCDEF = vaddq_f32(vaccCDEF, vaccCDEFx1);
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          const intptr_t diff = *dmap++;
+          const float32x4_t vi0123 = vld1q_f32(input);
+          const float32x4_t vi4567 = vld1q_f32(input + 4);
+          const float32x4_t vi89AB = vld1q_f32(input + 8);
+          const float32x4_t viCDEF = vld1q_f32(input + 12);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
+          vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+      float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vout89AB = vmaxq_f32(vout89AB, vmin);
+      voutCDEF = vmaxq_f32(voutCDEF, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      vst1q_f32(output + 8, vout89AB);
+      vst1q_f32(output + 12, voutCDEF);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 16;
+    mc -= 16 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 8 * sizeof(float);
+    if (mc & (8 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 8;
+    }
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc01 = vmla_f32(vacc01, vi01, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc0 = vmla_f32(vacc0, vi0, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/16x1-minmax-neon.c b/src/f32-spmm/gen/16x1-minmax-neon.c
new file mode 100644
index 0000000..d991a31
--- /dev/null
+++ b/src/f32-spmm/gen/16x1-minmax-neon.c
@@ -0,0 +1,193 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_16x1__neon(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 16 * sizeof(float);
+  while XNN_LIKELY(mc >= 16 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+      float32x4_t vacc4567 = vacc0123;
+      float32x4_t vacc89AB = vacc0123;
+      float32x4_t vaccCDEF = vacc0123;
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          const intptr_t diff = *dmap++;
+          const float32x4_t vi0123 = vld1q_f32(input);
+          const float32x4_t vi4567 = vld1q_f32(input + 4);
+          const float32x4_t vi89AB = vld1q_f32(input + 8);
+          const float32x4_t viCDEF = vld1q_f32(input + 12);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
+          vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+      float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vout89AB = vmaxq_f32(vout89AB, vmin);
+      voutCDEF = vmaxq_f32(voutCDEF, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      vst1q_f32(output + 8, vout89AB);
+      vst1q_f32(output + 12, voutCDEF);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 16;
+    mc -= 16 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 8 * sizeof(float);
+    if (mc & (8 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 8;
+    }
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc01 = vmla_f32(vacc01, vi01, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc0 = vmla_f32(vacc0, vi0, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/32x1-minmax-neon-pipelined.c b/src/f32-spmm/gen/32x1-minmax-neon-pipelined.c
new file mode 100644
index 0000000..f9a7686
--- /dev/null
+++ b/src/f32-spmm/gen/32x1-minmax-neon-pipelined.c
@@ -0,0 +1,288 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon-pipelined.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 32 * sizeof(float);
+  while XNN_LIKELY(mc >= 32 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    float32x4_t vw = vld1q_dup_f32(w); w += 1;
+    intptr_t diff = *dmap++;
+    float32x4_t vi0123 = vld1q_f32(input);
+    float32x4_t vi4567 = vld1q_f32(input + 4);
+    float32x4_t vi89AB = vld1q_f32(input + 8);
+    float32x4_t viCDEF = vld1q_f32(input + 12);
+    float32x4_t viGHIJ = vld1q_f32(input + 16);
+    float32x4_t viKLMN = vld1q_f32(input + 20);
+    float32x4_t viOPQR = vld1q_f32(input + 24);
+    float32x4_t viSTUV = vld1q_f32(input + 28);
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123 = vw;
+      float32x4_t vacc4567 = vw;
+      float32x4_t vacc89AB = vw;
+      float32x4_t vaccCDEF = vw;
+      float32x4_t vaccGHIJ = vw;
+      float32x4_t vaccKLMN = vw;
+      float32x4_t vaccOPQR = vw;
+      float32x4_t vaccSTUV = vw;
+      vw = vld1q_dup_f32(w); w += 1;
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
+          vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
+          vaccGHIJ = vmlaq_f32(vaccGHIJ, viGHIJ, vw);
+          vaccKLMN = vmlaq_f32(vaccKLMN, viKLMN, vw);
+          vaccOPQR = vmlaq_f32(vaccOPQR, viOPQR, vw);
+          vaccSTUV = vmlaq_f32(vaccSTUV, viSTUV, vw);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          __builtin_prefetch(input + 32);
+          diff = *dmap++;
+          vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vi0123 = vld1q_f32(input);
+          vi4567 = vld1q_f32(input + 4);
+          vi89AB = vld1q_f32(input + 8);
+          viCDEF = vld1q_f32(input + 12);
+          viGHIJ = vld1q_f32(input + 16);
+          viKLMN = vld1q_f32(input + 20);
+          viOPQR = vld1q_f32(input + 24);
+          viSTUV = vld1q_f32(input + 28);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+      float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
+      float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
+      float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
+      float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
+      float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vout89AB = vmaxq_f32(vout89AB, vmin);
+      voutCDEF = vmaxq_f32(voutCDEF, vmin);
+      voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
+      voutKLMN = vmaxq_f32(voutKLMN, vmin);
+      voutOPQR = vmaxq_f32(voutOPQR, vmin);
+      voutSTUV = vmaxq_f32(voutSTUV, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      vst1q_f32(output + 8, vout89AB);
+      vst1q_f32(output + 12, voutCDEF);
+      vst1q_f32(output + 16, voutGHIJ);
+      vst1q_f32(output + 20, voutKLMN);
+      vst1q_f32(output + 24, voutOPQR);
+      vst1q_f32(output + 28, voutSTUV);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 32;
+    mc -= 32 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 16 * sizeof(float);
+    if (mc & (16 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        float32x4_t vacc89AB = vacc0123;
+        float32x4_t vaccCDEF = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            const float32x4_t vi89AB = vld1q_f32(input + 8);
+            const float32x4_t viCDEF = vld1q_f32(input + 12);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            __builtin_prefetch(input + 32);
+            const float32x4_t vb = vld1q_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vb);
+            vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vb);
+            vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vb);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+        float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vout89AB = vmaxq_f32(vout89AB, vmin);
+        voutCDEF = vmaxq_f32(voutCDEF, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        vst1q_f32(output + 8, vout89AB);
+        vst1q_f32(output + 12, voutCDEF);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 16;
+    }
+    output_decrement += 8 * sizeof(float);
+    if (mc & (8 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            __builtin_prefetch(input + 32);
+            const float32x4_t vb = vld1q_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vb);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 8;
+    }
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            __builtin_prefetch(input + 32);
+            const float32x4_t vb = vld1q_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            __builtin_prefetch(input + 32);
+            const float32x2_t vb = vld1_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc01 = vmla_f32(vacc01, vi01, vb);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            __builtin_prefetch(input + 32);
+            const float32x2_t vb = vld1_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0 = vmla_f32(vacc0, vi0, vb);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/32x1-minmax-neon-x2.c b/src/f32-spmm/gen/32x1-minmax-neon-x2.c
new file mode 100644
index 0000000..91bedb1
--- /dev/null
+++ b/src/f32-spmm/gen/32x1-minmax-neon-x2.c
@@ -0,0 +1,333 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_32x1__neon_x2(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 32 * sizeof(float);
+  while XNN_LIKELY(mc >= 32 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
+      float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
+      float32x4_t vacc4567x0 = vacc0123x0;
+      float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
+      float32x4_t vacc89ABx0 = vacc0123x0;
+      float32x4_t vacc89ABx1 = vmovq_n_f32(0.0f);
+      float32x4_t vaccCDEFx0 = vacc0123x0;
+      float32x4_t vaccCDEFx1 = vmovq_n_f32(0.0f);
+      float32x4_t vaccGHIJx0 = vacc0123x0;
+      float32x4_t vaccGHIJx1 = vmovq_n_f32(0.0f);
+      float32x4_t vaccKLMNx0 = vacc0123x0;
+      float32x4_t vaccKLMNx1 = vmovq_n_f32(0.0f);
+      float32x4_t vaccOPQRx0 = vacc0123x0;
+      float32x4_t vaccOPQRx1 = vmovq_n_f32(0.0f);
+      float32x4_t vaccSTUVx0 = vacc0123x0;
+      float32x4_t vaccSTUVx1 = vmovq_n_f32(0.0f);
+      for (; nnz >= 2; nnz -= 2) {
+        const intptr_t diff0 = dmap[0];
+        const intptr_t diff1 = dmap[1];
+        dmap += 2;
+        const float32x4_t vi0123x0 = vld1q_f32(input);
+        const float32x4_t vi4567x0 = vld1q_f32(input + 4);
+        const float32x4_t vi89ABx0 = vld1q_f32(input + 8);
+        const float32x4_t viCDEFx0 = vld1q_f32(input + 12);
+        const float32x4_t viGHIJx0 = vld1q_f32(input + 16);
+        const float32x4_t viKLMNx0 = vld1q_f32(input + 20);
+        const float32x4_t viOPQRx0 = vld1q_f32(input + 24);
+        const float32x4_t viSTUVx0 = vld1q_f32(input + 28);
+        input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
+        __builtin_prefetch(input + 16);
+        __builtin_prefetch(input + 32);
+        const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
+        __builtin_prefetch(w + 32);
+        vacc0123x0 = vmlaq_f32(vacc0123x0, vi0123x0, vw0);
+        vacc4567x0 = vmlaq_f32(vacc4567x0, vi4567x0, vw0);
+        vacc89ABx0 = vmlaq_f32(vacc89ABx0, vi89ABx0, vw0);
+        vaccCDEFx0 = vmlaq_f32(vaccCDEFx0, viCDEFx0, vw0);
+        vaccGHIJx0 = vmlaq_f32(vaccGHIJx0, viGHIJx0, vw0);
+        vaccKLMNx0 = vmlaq_f32(vaccKLMNx0, viKLMNx0, vw0);
+        vaccOPQRx0 = vmlaq_f32(vaccOPQRx0, viOPQRx0, vw0);
+        vaccSTUVx0 = vmlaq_f32(vaccSTUVx0, viSTUVx0, vw0);
+        const float32x4_t vi0123x1 = vld1q_f32(input);
+        const float32x4_t vi4567x1 = vld1q_f32(input + 4);
+        const float32x4_t vi89ABx1 = vld1q_f32(input + 8);
+        const float32x4_t viCDEFx1 = vld1q_f32(input + 12);
+        const float32x4_t viGHIJx1 = vld1q_f32(input + 16);
+        const float32x4_t viKLMNx1 = vld1q_f32(input + 20);
+        const float32x4_t viOPQRx1 = vld1q_f32(input + 24);
+        const float32x4_t viSTUVx1 = vld1q_f32(input + 28);
+        input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
+        __builtin_prefetch(input + 16);
+        __builtin_prefetch(input + 32);
+        const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
+        __builtin_prefetch(w + 32);
+        vacc0123x1 = vmlaq_f32(vacc0123x1, vi0123x1, vw1);
+        vacc4567x1 = vmlaq_f32(vacc4567x1, vi4567x1, vw1);
+        vacc89ABx1 = vmlaq_f32(vacc89ABx1, vi89ABx1, vw1);
+        vaccCDEFx1 = vmlaq_f32(vaccCDEFx1, viCDEFx1, vw1);
+        vaccGHIJx1 = vmlaq_f32(vaccGHIJx1, viGHIJx1, vw1);
+        vaccKLMNx1 = vmlaq_f32(vaccKLMNx1, viKLMNx1, vw1);
+        vaccOPQRx1 = vmlaq_f32(vaccOPQRx1, viOPQRx1, vw1);
+        vaccSTUVx1 = vmlaq_f32(vaccSTUVx1, viSTUVx1, vw1);
+      }
+      float32x4_t vacc0123 = vacc0123x0;
+      float32x4_t vacc4567 = vacc4567x0;
+      float32x4_t vacc89AB = vacc89ABx0;
+      float32x4_t vaccCDEF = vaccCDEFx0;
+      float32x4_t vaccGHIJ = vaccGHIJx0;
+      float32x4_t vaccKLMN = vaccKLMNx0;
+      float32x4_t vaccOPQR = vaccOPQRx0;
+      float32x4_t vaccSTUV = vaccSTUVx0;
+      vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
+      vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
+      vacc89AB = vaddq_f32(vacc89AB, vacc89ABx1);
+      vaccCDEF = vaddq_f32(vaccCDEF, vaccCDEFx1);
+      vaccGHIJ = vaddq_f32(vaccGHIJ, vaccGHIJx1);
+      vaccKLMN = vaddq_f32(vaccKLMN, vaccKLMNx1);
+      vaccOPQR = vaddq_f32(vaccOPQR, vaccOPQRx1);
+      vaccSTUV = vaddq_f32(vaccSTUV, vaccSTUVx1);
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          const intptr_t diff = *dmap++;
+          const float32x4_t vi0123 = vld1q_f32(input);
+          const float32x4_t vi4567 = vld1q_f32(input + 4);
+          const float32x4_t vi89AB = vld1q_f32(input + 8);
+          const float32x4_t viCDEF = vld1q_f32(input + 12);
+          const float32x4_t viGHIJ = vld1q_f32(input + 16);
+          const float32x4_t viKLMN = vld1q_f32(input + 20);
+          const float32x4_t viOPQR = vld1q_f32(input + 24);
+          const float32x4_t viSTUV = vld1q_f32(input + 28);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          __builtin_prefetch(input + 32);
+          const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
+          vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
+          vaccGHIJ = vmlaq_f32(vaccGHIJ, viGHIJ, vw);
+          vaccKLMN = vmlaq_f32(vaccKLMN, viKLMN, vw);
+          vaccOPQR = vmlaq_f32(vaccOPQR, viOPQR, vw);
+          vaccSTUV = vmlaq_f32(vaccSTUV, viSTUV, vw);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+      float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
+      float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
+      float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
+      float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
+      float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vout89AB = vmaxq_f32(vout89AB, vmin);
+      voutCDEF = vmaxq_f32(voutCDEF, vmin);
+      voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
+      voutKLMN = vmaxq_f32(voutKLMN, vmin);
+      voutOPQR = vmaxq_f32(voutOPQR, vmin);
+      voutSTUV = vmaxq_f32(voutSTUV, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      vst1q_f32(output + 8, vout89AB);
+      vst1q_f32(output + 12, voutCDEF);
+      vst1q_f32(output + 16, voutGHIJ);
+      vst1q_f32(output + 20, voutKLMN);
+      vst1q_f32(output + 24, voutOPQR);
+      vst1q_f32(output + 28, voutSTUV);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 32;
+    mc -= 32 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 16 * sizeof(float);
+    if (mc & (16 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        float32x4_t vacc89AB = vacc0123;
+        float32x4_t vaccCDEF = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            const float32x4_t vi89AB = vld1q_f32(input + 8);
+            const float32x4_t viCDEF = vld1q_f32(input + 12);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+            vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
+            vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+        float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vout89AB = vmaxq_f32(vout89AB, vmin);
+        voutCDEF = vmaxq_f32(voutCDEF, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        vst1q_f32(output + 8, vout89AB);
+        vst1q_f32(output + 12, voutCDEF);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 16;
+    }
+    output_decrement += 8 * sizeof(float);
+    if (mc & (8 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 8;
+    }
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc01 = vmla_f32(vacc01, vi01, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc0 = vmla_f32(vacc0, vi0, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/32x1-minmax-neon.c b/src/f32-spmm/gen/32x1-minmax-neon.c
new file mode 100644
index 0000000..ae1263e
--- /dev/null
+++ b/src/f32-spmm/gen/32x1-minmax-neon.c
@@ -0,0 +1,262 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_32x1__neon(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 32 * sizeof(float);
+  while XNN_LIKELY(mc >= 32 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+      float32x4_t vacc4567 = vacc0123;
+      float32x4_t vacc89AB = vacc0123;
+      float32x4_t vaccCDEF = vacc0123;
+      float32x4_t vaccGHIJ = vacc0123;
+      float32x4_t vaccKLMN = vacc0123;
+      float32x4_t vaccOPQR = vacc0123;
+      float32x4_t vaccSTUV = vacc0123;
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          const intptr_t diff = *dmap++;
+          const float32x4_t vi0123 = vld1q_f32(input);
+          const float32x4_t vi4567 = vld1q_f32(input + 4);
+          const float32x4_t vi89AB = vld1q_f32(input + 8);
+          const float32x4_t viCDEF = vld1q_f32(input + 12);
+          const float32x4_t viGHIJ = vld1q_f32(input + 16);
+          const float32x4_t viKLMN = vld1q_f32(input + 20);
+          const float32x4_t viOPQR = vld1q_f32(input + 24);
+          const float32x4_t viSTUV = vld1q_f32(input + 28);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          __builtin_prefetch(input + 32);
+          const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
+          vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
+          vaccGHIJ = vmlaq_f32(vaccGHIJ, viGHIJ, vw);
+          vaccKLMN = vmlaq_f32(vaccKLMN, viKLMN, vw);
+          vaccOPQR = vmlaq_f32(vaccOPQR, viOPQR, vw);
+          vaccSTUV = vmlaq_f32(vaccSTUV, viSTUV, vw);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+      float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
+      float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
+      float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
+      float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
+      float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vout89AB = vmaxq_f32(vout89AB, vmin);
+      voutCDEF = vmaxq_f32(voutCDEF, vmin);
+      voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
+      voutKLMN = vmaxq_f32(voutKLMN, vmin);
+      voutOPQR = vmaxq_f32(voutOPQR, vmin);
+      voutSTUV = vmaxq_f32(voutSTUV, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      vst1q_f32(output + 8, vout89AB);
+      vst1q_f32(output + 12, voutCDEF);
+      vst1q_f32(output + 16, voutGHIJ);
+      vst1q_f32(output + 20, voutKLMN);
+      vst1q_f32(output + 24, voutOPQR);
+      vst1q_f32(output + 28, voutSTUV);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 32;
+    mc -= 32 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 16 * sizeof(float);
+    if (mc & (16 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        float32x4_t vacc89AB = vacc0123;
+        float32x4_t vaccCDEF = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            const float32x4_t vi89AB = vld1q_f32(input + 8);
+            const float32x4_t viCDEF = vld1q_f32(input + 12);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+            vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
+            vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
+        float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vout89AB = vmaxq_f32(vout89AB, vmin);
+        voutCDEF = vmaxq_f32(voutCDEF, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        vst1q_f32(output + 8, vout89AB);
+        vst1q_f32(output + 12, voutCDEF);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 16;
+    }
+    output_decrement += 8 * sizeof(float);
+    if (mc & (8 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        float32x4_t vacc4567 = vacc0123;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            const float32x4_t vi4567 = vld1q_f32(input + 4);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+            vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vout4567 = vmaxq_f32(vout4567, vmin);
+        vst1q_f32(output, vout0123);
+        vst1q_f32(output + 4, vout4567);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 8;
+    }
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc01 = vmla_f32(vacc01, vi01, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc0 = vmla_f32(vacc0, vi0, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/4x1-minmax-neon-pipelined.c b/src/f32-spmm/gen/4x1-minmax-neon-pipelined.c
new file mode 100644
index 0000000..bceee8b
--- /dev/null
+++ b/src/f32-spmm/gen/4x1-minmax-neon-pipelined.c
@@ -0,0 +1,125 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon-pipelined.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 4 * sizeof(float);
+  while XNN_LIKELY(mc >= 4 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    float32x4_t vw = vld1q_dup_f32(w); w += 1;
+    intptr_t diff = *dmap++;
+    float32x4_t vi0123 = vld1q_f32(input);
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123 = vw;
+      vw = vld1q_dup_f32(w); w += 1;
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          diff = *dmap++;
+          vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vi0123 = vld1q_f32(input);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vst1q_f32(output, vout0123);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 4;
+    mc -= 4 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            const float32x2_t vb = vld1_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc01 = vmla_f32(vacc01, vi01, vb);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            const float32x2_t vb = vld1_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0 = vmla_f32(vacc0, vi0, vb);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/4x1-minmax-neon-x2.c b/src/f32-spmm/gen/4x1-minmax-neon-x2.c
new file mode 100644
index 0000000..1b5dc5d
--- /dev/null
+++ b/src/f32-spmm/gen/4x1-minmax-neon-x2.c
@@ -0,0 +1,137 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_4x1__neon_x2(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 4 * sizeof(float);
+  while XNN_LIKELY(mc >= 4 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
+      float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
+      for (; nnz >= 2; nnz -= 2) {
+        const intptr_t diff0 = dmap[0];
+        const intptr_t diff1 = dmap[1];
+        dmap += 2;
+        const float32x4_t vi0123x0 = vld1q_f32(input);
+        input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
+        __builtin_prefetch(input + 16);
+        const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
+        __builtin_prefetch(w + 32);
+        vacc0123x0 = vmlaq_f32(vacc0123x0, vi0123x0, vw0);
+        const float32x4_t vi0123x1 = vld1q_f32(input);
+        input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
+        __builtin_prefetch(input + 16);
+        const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
+        __builtin_prefetch(w + 32);
+        vacc0123x1 = vmlaq_f32(vacc0123x1, vi0123x1, vw1);
+      }
+      float32x4_t vacc0123 = vacc0123x0;
+      vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          const intptr_t diff = *dmap++;
+          const float32x4_t vi0123 = vld1q_f32(input);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vst1q_f32(output, vout0123);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 4;
+    mc -= 4 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc01 = vmla_f32(vacc01, vi01, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc0 = vmla_f32(vacc0, vi0, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/4x1-minmax-neon.c b/src/f32-spmm/gen/4x1-minmax-neon.c
new file mode 100644
index 0000000..94de2a3
--- /dev/null
+++ b/src/f32-spmm/gen/4x1-minmax-neon.c
@@ -0,0 +1,117 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_4x1__neon(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 4 * sizeof(float);
+  while XNN_LIKELY(mc >= 4 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          const intptr_t diff = *dmap++;
+          const float32x4_t vi0123 = vld1q_f32(input);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vst1q_f32(output, vout0123);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 4;
+    mc -= 4 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc01 = vmla_f32(vacc01, vi01, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc0 = vmla_f32(vacc0, vi0, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/8x1-minmax-neon-pipelined.c b/src/f32-spmm/gen/8x1-minmax-neon-pipelined.c
new file mode 100644
index 0000000..ac3784b
--- /dev/null
+++ b/src/f32-spmm/gen/8x1-minmax-neon-pipelined.c
@@ -0,0 +1,160 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon-pipelined.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 8 * sizeof(float);
+  while XNN_LIKELY(mc >= 8 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    float32x4_t vw = vld1q_dup_f32(w); w += 1;
+    intptr_t diff = *dmap++;
+    float32x4_t vi0123 = vld1q_f32(input);
+    float32x4_t vi4567 = vld1q_f32(input + 4);
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123 = vw;
+      float32x4_t vacc4567 = vw;
+      vw = vld1q_dup_f32(w); w += 1;
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          diff = *dmap++;
+          vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vi0123 = vld1q_f32(input);
+          vi4567 = vld1q_f32(input + 4);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 8;
+    mc -= 8 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            const float32x4_t vb = vld1q_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            const float32x2_t vb = vld1_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc01 = vmla_f32(vacc01, vi01, vb);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            __builtin_prefetch(input + 16);
+            const float32x2_t vb = vld1_dup_f32(w); w += 1;
+            __builtin_prefetch(w + 32);
+            vacc0 = vmla_f32(vacc0, vi0, vb);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/8x1-minmax-neon-x2.c b/src/f32-spmm/gen/8x1-minmax-neon-x2.c
new file mode 100644
index 0000000..f1466b4
--- /dev/null
+++ b/src/f32-spmm/gen/8x1-minmax-neon-x2.c
@@ -0,0 +1,176 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_8x1__neon_x2(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 8 * sizeof(float);
+  while XNN_LIKELY(mc >= 8 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
+      float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
+      float32x4_t vacc4567x0 = vacc0123x0;
+      float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
+      for (; nnz >= 2; nnz -= 2) {
+        const intptr_t diff0 = dmap[0];
+        const intptr_t diff1 = dmap[1];
+        dmap += 2;
+        const float32x4_t vi0123x0 = vld1q_f32(input);
+        const float32x4_t vi4567x0 = vld1q_f32(input + 4);
+        input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
+        __builtin_prefetch(input + 16);
+        const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
+        __builtin_prefetch(w + 32);
+        vacc0123x0 = vmlaq_f32(vacc0123x0, vi0123x0, vw0);
+        vacc4567x0 = vmlaq_f32(vacc4567x0, vi4567x0, vw0);
+        const float32x4_t vi0123x1 = vld1q_f32(input);
+        const float32x4_t vi4567x1 = vld1q_f32(input + 4);
+        input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
+        __builtin_prefetch(input + 16);
+        const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
+        __builtin_prefetch(w + 32);
+        vacc0123x1 = vmlaq_f32(vacc0123x1, vi0123x1, vw1);
+        vacc4567x1 = vmlaq_f32(vacc4567x1, vi4567x1, vw1);
+      }
+      float32x4_t vacc0123 = vacc0123x0;
+      float32x4_t vacc4567 = vacc4567x0;
+      vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
+      vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          const intptr_t diff = *dmap++;
+          const float32x4_t vi0123 = vld1q_f32(input);
+          const float32x4_t vi4567 = vld1q_f32(input + 4);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 8;
+    mc -= 8 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc01 = vmla_f32(vacc01, vi01, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc0 = vmla_f32(vacc0, vi0, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/gen/8x1-minmax-neon.c b/src/f32-spmm/gen/8x1-minmax-neon.c
new file mode 100644
index 0000000..5b1fb54
--- /dev/null
+++ b/src/f32-spmm/gen/8x1-minmax-neon.c
@@ -0,0 +1,149 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-spmm/neon.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/spmm.h>
+
+
+void xnn_f32_spmm_minmax_ukernel_8x1__neon(
+    size_t mc,
+    size_t nc,
+    const float*restrict input,
+    const float*restrict weights,
+    const int32_t*restrict widx_dmap,
+    const uint32_t*restrict nidx_nnzmap,
+    float*restrict output,
+    size_t output_stride,
+    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(mc != 0);
+  assert(mc % sizeof(float) == 0);
+  assert(nc != 0);
+
+  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
+  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
+  size_t output_decrement = output_stride * nc - 8 * sizeof(float);
+  while XNN_LIKELY(mc >= 8 * sizeof(float)) {
+    const float*restrict w = weights;
+    const int32_t* dmap = widx_dmap;
+    const uint32_t* nnzmap = nidx_nnzmap;
+    size_t n = nc;
+    do {
+      uint32_t nnz = *nnzmap++;
+      float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+      float32x4_t vacc4567 = vacc0123;
+      if XNN_LIKELY(nnz != 0) {
+        do {
+          const intptr_t diff = *dmap++;
+          const float32x4_t vi0123 = vld1q_f32(input);
+          const float32x4_t vi4567 = vld1q_f32(input + 4);
+          input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+          __builtin_prefetch(input + 16);
+          const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+          __builtin_prefetch(w + 32);
+          vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
+        } while (--nnz != 0);
+      }
+      float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+      float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
+      vout0123 = vmaxq_f32(vout0123, vmin);
+      vout4567 = vmaxq_f32(vout4567, vmin);
+      vst1q_f32(output, vout0123);
+      vst1q_f32(output + 4, vout4567);
+      output = (float*restrict) ((uintptr_t) output + output_stride);
+    } while (--n != 0);
+    output = (float*restrict) ((uintptr_t) output - output_decrement);
+    input += 8;
+    mc -= 8 * sizeof(float);
+  }
+  if XNN_UNLIKELY(mc != 0) {
+    output_decrement += 4 * sizeof(float);
+    if (mc & (4 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x4_t vi0123 = vld1q_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x4_t vw = vld1q_dup_f32(w); w += 1;
+            vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
+          } while (--nnz != 0);
+        }
+        float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
+        vout0123 = vmaxq_f32(vout0123, vmin);
+        vst1q_f32(output, vout0123);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 4;
+    }
+    output_decrement += 2 * sizeof(float);
+    if (mc & (2 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi01 = vld1_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc01 = vmla_f32(vacc01, vi01, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
+        vout01 = vmax_f32(vout01, vget_low_f32(vmin));
+        vst1_f32(output, vout01);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 2;
+    }
+    output_decrement += 1 * sizeof(float);
+    if (mc & (1 * sizeof(float))) {
+      const float*restrict w = weights;
+      const int32_t* dmap = widx_dmap;
+      const uint32_t* nnzmap = nidx_nnzmap;
+      size_t n = nc;
+      do {
+        uint32_t nnz = *nnzmap++;
+        float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
+        if XNN_LIKELY(nnz != 0) {
+          do {
+            const intptr_t diff = *dmap++;
+            const float32x2_t vi0 = vld1_dup_f32(input);
+            input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
+            const float32x2_t vw = vld1_dup_f32(w); w += 1;
+            vacc0 = vmla_f32(vacc0, vi0, vw);
+          } while (--nnz != 0);
+        }
+        float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
+        vout0 = vmax_f32(vout0, vget_low_f32(vmin));
+        vst1_lane_f32(output, vout0, 0);
+        output = (float*restrict) ((uintptr_t) output + output_stride);
+      } while (--n != 0);
+      output = (float*restrict) ((uintptr_t) output - output_decrement);
+      input += 1;
+    }
+  }
+}
diff --git a/src/f32-spmm/neon-pipelined.c.in b/src/f32-spmm/neon-pipelined.c.in
index cb7b948..344aff2 100644
--- a/src/f32-spmm/neon-pipelined.c.in
+++ b/src/f32-spmm/neon-pipelined.c.in
@@ -5,6 +5,8 @@
 
 $assert MR % 4 == 0
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$VMULADD_F32 = "vfma_f32" if FMA else "vmla_f32"
+$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
 #include <assert.h>
 
 #include <arm_neon.h>
@@ -48,7 +50,7 @@
       if XNN_LIKELY(nnz != 0) {
         do {
           $for M in range(0, MR, 4):
-            vacc${ABC[M:M+4]} = vfmaq_f32(vacc${ABC[M:M+4]}, vi${ABC[M:M+4]}, vw);
+            vacc${ABC[M:M+4]} = ${VMULADDQ_F32}(vacc${ABC[M:M+4]}, vi${ABC[M:M+4]}, vw);
           input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
           $for M in range(0, MR, 16):
             __builtin_prefetch(input + ${M+16});
@@ -113,10 +115,10 @@
                 const float32x4_t vb = vld1q_dup_f32(w); w += 1;
               __builtin_prefetch(w + 32);
               $if SUBMR <= 2:
-                vacc${ABC[0:SUBMR]} = vfma_f32(vacc${ABC[0:SUBMR]}, vi${ABC[0:SUBMR]}, vb);
+                vacc${ABC[0:SUBMR]} = ${VMULADD_F32}(vacc${ABC[0:SUBMR]}, vi${ABC[0:SUBMR]}, vb);
               $else:
                 $for M in range(0, SUBMR, 4):
-                  vacc${ABC[M:M+4]} = vfmaq_f32(vacc${ABC[M:M+4]}, vi${ABC[M:M+4]}, vb);
+                  vacc${ABC[M:M+4]} = ${VMULADDQ_F32}(vacc${ABC[M:M+4]}, vi${ABC[M:M+4]}, vb);
             } while (--nnz != 0);
           }
           $if SUBMR <= 2:
diff --git a/src/f32-spmm/neon.c.in b/src/f32-spmm/neon.c.in
index 280c15a..6fafd71 100644
--- a/src/f32-spmm/neon.c.in
+++ b/src/f32-spmm/neon.c.in
@@ -5,6 +5,8 @@
 
 $assert MR % 4 == 0
 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$VMULADD_F32 = "vfma_f32" if FMA else "vmla_f32"
+$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
 #include <assert.h>
 
 #include <arm_neon.h>
@@ -59,7 +61,7 @@
             const float32x4_t vw${K} = vld1q_dup_f32(w); w += 1;
             __builtin_prefetch(w + 32);
             $for M in range(0, MR, 4):
-              vacc${ABC[M:M+4]}x${K} = vfmaq_f32(vacc${ABC[M:M+4]}x${K}, vi${ABC[M:M+4]}x${K}, vw${K});
+              vacc${ABC[M:M+4]}x${K} = ${VMULADDQ_F32}(vacc${ABC[M:M+4]}x${K}, vi${ABC[M:M+4]}x${K}, vw${K});
         }
         $for M in range(0, MR, 4):
           float32x4_t vacc${ABC[M:M+4]} = vacc${ABC[M:M+4]}x0;
@@ -82,7 +84,7 @@
           const float32x4_t vw = vld1q_dup_f32(w); w += 1;
           __builtin_prefetch(w + 32);
           $for M in range(0, MR, 4):
-            vacc${ABC[M:M+4]} = vfmaq_f32(vacc${ABC[M:M+4]}, vi${ABC[M:M+4]}, vw);
+            vacc${ABC[M:M+4]} = ${VMULADDQ_F32}(vacc${ABC[M:M+4]}, vi${ABC[M:M+4]}, vw);
         } while (--nnz != 0);
       }
       $for M in range(0, MR, 4):
@@ -135,10 +137,10 @@
               $else:
                 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
               $if SUBMR <= 2:
-                vacc${ABC[0:SUBMR]} = vfma_f32(vacc${ABC[0:SUBMR]}, vi${ABC[0:SUBMR]}, vw);
+                vacc${ABC[0:SUBMR]} = ${VMULADD_F32}(vacc${ABC[0:SUBMR]}, vi${ABC[0:SUBMR]}, vw);
               $else:
                 $for M in range(0, SUBMR, 4):
-                  vacc${ABC[M:M+4]} = vfmaq_f32(vacc${ABC[M:M+4]}, vi${ABC[M:M+4]}, vw);
+                  vacc${ABC[M:M+4]} = ${VMULADDQ_F32}(vacc${ABC[M:M+4]}, vi${ABC[M:M+4]}, vw);
             } while (--nnz != 0);
           }
           $if SUBMR <= 2:
diff --git a/src/f32-velu/avx-rr2-lut16-p3.c.in b/src/f32-velu/avx-rr2-lut16-p3.c.in
new file mode 100644
index 0000000..ae85e89
--- /dev/null
+++ b/src/f32-velu/avx-rr2-lut16-p3.c.in
@@ -0,0 +1,269 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$SIMD_TILE = BATCH_TILE // 8
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0xF));
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  $if BATCH_TILE > 8:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m256 vx0 = _mm256_loadu_ps(x);
+      $for N in range(1, SIMD_TILE):
+        __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8});
+      x += ${BATCH_TILE};
+
+      $for N in range(SIMD_TILE):
+        const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale));
+
+      $for N in range(SIMD_TILE):
+        __m256 vn${N} = _mm256_add_ps(_mm256_mul_ps(vz${N}, vlog2e), vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        const __m256 vidx${N} = _mm256_and_ps(vn${N}, vindex_mask);
+
+        const __m128i vidx${N}_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx${N})), 2);
+        const __m128i vidx${N}_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx${N}, 1)), 2);
+        #if XNN_ARCH_X86_64
+          const uint64_t vidx${N}_ll = (uint64_t) _mm_cvtsi128_si64(vidx${N}_lo);
+          const uint64_t vidx${N}_lh = (uint64_t) _mm_extract_epi64(vidx${N}_lo, 1);
+          const uint64_t vidx${N}_hl = (uint64_t) _mm_cvtsi128_si64(vidx${N}_hi);
+          const uint64_t vidx${N}_hh = (uint64_t) _mm_extract_epi64(vidx${N}_hi, 1);
+          __m128i vl${N}_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${N}_ll));
+          __m128i vl${N}_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${N}_lh));
+          __m128i vl${N}_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${N}_hl));
+          __m128i vl${N}_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${N}_hh));
+          vl${N}_ll = _mm_insert_epi32(vl${N}_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${N}_ll >> 32))), 1);
+          vl${N}_lh = _mm_insert_epi32(vl${N}_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${N}_lh >> 32))), 1);
+          vl${N}_hl = _mm_insert_epi32(vl${N}_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${N}_hl >> 32))), 1);
+          vl${N}_hh = _mm_insert_epi32(vl${N}_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${N}_hh >> 32))), 1);
+        #else
+          __m128i vl${N}_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx${N}_lo)));
+          __m128i vl${N}_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx${N}_lo, 2)));
+          __m128i vl${N}_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx${N}_hi)));
+          __m128i vl${N}_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx${N}_hi, 2)));
+          vl${N}_ll = _mm_insert_epi32(vl${N}_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx${N}_lo, 1))), 1);
+          vl${N}_lh = _mm_insert_epi32(vl${N}_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx${N}_lo, 3))), 1);
+          vl${N}_hl = _mm_insert_epi32(vl${N}_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx${N}_hi, 1))), 1);
+          vl${N}_hh = _mm_insert_epi32(vl${N}_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx${N}_hi, 3))), 1);
+        #endif
+        const __m128i vl${N}_lo = _mm_unpacklo_epi64(vl${N}_ll, vl${N}_lh);
+        const __m128i vl${N}_hi = _mm_unpacklo_epi64(vl${N}_hl, vl${N}_hh);
+
+      $for N in range(SIMD_TILE):
+        const __m128i ven${N}_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn${N})), 19);
+        const __m128i ven${N}_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn${N}, 1)), 19);
+        vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias);
+        const __m128 vs${N}_lo = _mm_castsi128_ps(_mm_add_epi32(vl${N}_lo, ven${N}_lo));
+        const __m128 vs${N}_hi = _mm_castsi128_ps(_mm_add_epi32(vl${N}_hi, ven${N}_hi));
+
+      $for N in range(SIMD_TILE):
+        __m256 vt${N} = _mm256_add_ps(_mm256_mul_ps(vn${N}, vminus_ln2_hi), vz${N});
+
+      $for N in range(SIMD_TILE):
+        vt${N} = _mm256_add_ps(_mm256_mul_ps(vn${N}, vminus_ln2_lo), vt${N});
+        __m256 vs${N} = _mm256_insertf128_ps(_mm256_castps128_ps256(vs${N}_lo), vs${N}_hi, 1);
+
+      $for N in range(SIMD_TILE):
+        __m256 vp${N} = _mm256_add_ps(_mm256_mul_ps(vc3, vt${N}), vc2);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_mul_ps(vp${N}, vt${N});
+
+      $for N in range(SIMD_TILE):
+        vt${N} = _mm256_mul_ps(vt${N}, vs${N});
+        vs${N} = _mm256_sub_ps(vs${N}, vone);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_add_ps(_mm256_mul_ps(vp${N}, vt${N}), vt${N});
+
+      $for N in range(SIMD_TILE):
+        const __m256 ve${N} = _mm256_mul_ps(_mm256_add_ps(vp${N}, vs${N}), valpha);
+        vx${N} = _mm256_mul_ps(vx${N}, vbeta);
+
+      $for N in range(SIMD_TILE):
+        const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N});
+
+      _mm256_storeu_ps(y, vy0);
+      $for N in range(1, SIMD_TILE):
+        _mm256_storeu_ps(y + ${N * 8}, vy${N});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/avx-rr2-lut4-p4-perm.c.in b/src/f32-velu/avx-rr2-lut4-p4-perm.c.in
new file mode 100644
index 0000000..48eba99
--- /dev/null
+++ b/src/f32-velu/avx-rr2-lut4-p4-perm.c.in
@@ -0,0 +1,185 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$SIMD_TILE = BATCH_TILE // 8
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8003F8p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x3));
+  const __m256 vtable = _mm256_set_ps(
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f,
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  $if BATCH_TILE > 8:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m256 vx0 = _mm256_loadu_ps(x);
+      $for N in range(1, SIMD_TILE):
+        __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8});
+      x += ${BATCH_TILE};
+
+      $for N in range(SIMD_TILE):
+        const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale));
+
+      $for N in range(SIMD_TILE):
+        __m256 vn${N} = _mm256_add_ps(_mm256_mul_ps(vz${N}, vlog2e), vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m256 ven${N} = _mm256_andnot_ps(vindex_mask, vn${N});
+        const __m256 vl${N} = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn${N}));
+        const __m128 ven${N}_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven${N})), 21));
+
+      $for N in range(SIMD_TILE):
+        vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias);
+        const __m128 ven${N}_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven${N}, 1)), 21));
+
+      $for N in range(SIMD_TILE):
+        __m256 vt${N} = _mm256_add_ps(_mm256_mul_ps(vn${N}, vminus_ln2_hi), vz${N});
+        ven${N} = _mm256_insertf128_ps(_mm256_castps128_ps256(ven${N}_lo), ven${N}_hi, 1);
+
+      $for N in range(SIMD_TILE):
+        vt${N} = _mm256_add_ps(_mm256_mul_ps(vn${N}, vminus_ln2_lo), vt${N});
+        __m256 vs${N} = _mm256_mul_ps(vl${N}, ven${N});
+
+      $for N in range(SIMD_TILE):
+        __m256 vp${N} = _mm256_add_ps(_mm256_mul_ps(vc4, vt${N}), vc3);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_add_ps(_mm256_mul_ps(vp${N}, vt${N}), vc2);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_mul_ps(vp${N}, vt${N});
+
+      $for N in range(SIMD_TILE):
+        vt${N} = _mm256_mul_ps(vt${N}, vs${N});
+        vs${N} = _mm256_sub_ps(vs${N}, vone);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_add_ps(_mm256_mul_ps(vp${N}, vt${N}), vt${N});
+
+      $for N in range(SIMD_TILE):
+        const __m256 ve${N} = _mm256_mul_ps(_mm256_add_ps(vp${N}, vs${N}), valpha);
+        vx${N} = _mm256_mul_ps(vx${N}, vbeta);
+
+      $for N in range(SIMD_TILE):
+        const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N});
+
+      _mm256_storeu_ps(y, vy0);
+      $for N in range(1, SIMD_TILE):
+        _mm256_storeu_ps(y + ${N * 8}, vy${N});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/avx-rr2-p6.c.in b/src/f32-velu/avx-rr2-p6.c.in
new file mode 100644
index 0000000..5610c7f
--- /dev/null
+++ b/src/f32-velu/avx-rr2-p6.c.in
@@ -0,0 +1,182 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$SIMD_TILE = BATCH_TILE // 8
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_p6_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E440p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.0105C6p-21f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  $if BATCH_TILE > 8:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m256 vx0 = _mm256_loadu_ps(x);
+      $for N in range(1, SIMD_TILE):
+        __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8});
+      x += ${BATCH_TILE};
+
+      $for N in range(SIMD_TILE):
+        const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale));
+
+      $for N in range(SIMD_TILE):
+        __m256 vn${N} = _mm256_add_ps(_mm256_mul_ps(vz${N}, vlog2e), vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        const __m128 vs${N}_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn${N})), 23));
+        const __m128 vs${N}_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn${N}, 1)), 23));
+        vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m256 vt${N} = _mm256_add_ps(_mm256_mul_ps(vn${N}, vminus_ln2_hi), vz${N});
+        __m256 vs${N} = _mm256_insertf128_ps(_mm256_castps128_ps256(vs${N}_lo), vs${N}_hi, 1);
+
+      $for N in range(SIMD_TILE):
+        vt${N} = _mm256_add_ps(_mm256_mul_ps(vn${N}, vminus_ln2_lo), vt${N});
+
+      $for N in range(SIMD_TILE):
+        __m256 vp${N} = _mm256_add_ps(_mm256_mul_ps(vc6, vt${N}), vc5);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_add_ps(_mm256_mul_ps(vp${N}, vt${N}), vc4);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_add_ps(_mm256_mul_ps(vp${N}, vt${N}), vc3);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_add_ps(_mm256_mul_ps(vp${N}, vt${N}), vc2);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_mul_ps(vp${N}, vt${N});
+
+      $for N in range(SIMD_TILE):
+        vt${N} = _mm256_mul_ps(vt${N}, vs${N});
+        vs${N} = _mm256_sub_ps(vs${N}, vone);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_add_ps(_mm256_mul_ps(vp${N}, vt${N}), vt${N});
+
+      $for N in range(SIMD_TILE):
+        const __m256 ve${N} = _mm256_mul_ps(_mm256_add_ps(vp${N}, vs${N}), valpha);
+        vx${N} = _mm256_mul_ps(vx${N}, vbeta);
+
+      $for N in range(SIMD_TILE):
+        const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N});
+
+      _mm256_storeu_ps(y, vy0);
+      $for N in range(1, SIMD_TILE):
+        _mm256_storeu_ps(y + ${N * 8}, vy${N});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/avx2-rr1-lut16-p3-gather.c.in b/src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
new file mode 100644
index 0000000..0023d07
--- /dev/null
+++ b/src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
@@ -0,0 +1,165 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$SIMD_TILE = BATCH_TILE // 8
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  $if BATCH_TILE > 8:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m256 vx0 = _mm256_loadu_ps(x);
+      $for N in range(1, SIMD_TILE):
+        __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8});
+      x += ${BATCH_TILE};
+
+      $for N in range(SIMD_TILE):
+        const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale));
+
+      $for N in range(SIMD_TILE):
+        __m256 vn${N} = _mm256_fmadd_ps(vz${N}, vlog2e, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        const __m256i vidx${N} = _mm256_and_si256(_mm256_castps_si256(vn${N}), vindex_mask);
+        const __m256i vl${N} = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx${N}, sizeof(float));
+
+      $for N in range(SIMD_TILE):
+        const __m256i ven${N} = _mm256_slli_epi32(_mm256_castps_si256(vn${N}), 19);
+        vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m256 vs${N} = _mm256_castsi256_ps(_mm256_add_epi32(vl${N}, ven${N}));
+        __m256 vt${N} = _mm256_fmadd_ps(vn${N}, vminus_ln2, vz${N});
+
+      $for N in range(SIMD_TILE):
+        __m256 vp${N} = _mm256_fmadd_ps(vc3, vt${N}, vc2);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_mul_ps(vp${N}, vt${N});
+        vt${N} = _mm256_mul_ps(vt${N}, vs${N});
+
+      $for N in range(SIMD_TILE):
+        vs${N} = _mm256_fmsub_ps(vs${N}, valpha, valpha);
+        vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vt${N});
+
+      $for N in range(SIMD_TILE):
+        const __m256 ve${N} = _mm256_fmadd_ps(vp${N}, valpha, vs${N});
+        vx${N} = _mm256_mul_ps(vx${N}, vbeta);
+
+      $for N in range(SIMD_TILE):
+        const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N});
+
+      _mm256_storeu_ps(y, vy0);
+      $for N in range(1, SIMD_TILE):
+        _mm256_storeu_ps(y + ${N * 8}, vy${N});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/avx2-rr1-lut4-p4-perm.c.in b/src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
new file mode 100644
index 0000000..3a60243
--- /dev/null
+++ b/src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
@@ -0,0 +1,164 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$SIMD_TILE = BATCH_TILE // 8
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  $if BATCH_TILE > 8:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m256 vx0 = _mm256_loadu_ps(x);
+      $for N in range(1, SIMD_TILE):
+        __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8});
+      x += ${BATCH_TILE};
+
+      $for N in range(SIMD_TILE):
+        const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale));
+
+      $for N in range(SIMD_TILE):
+        __m256 vn${N} = _mm256_fmadd_ps(vz${N}, vlog2e, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        const __m256i ven${N} = _mm256_slli_epi32(_mm256_castps_si256(vn${N}), 21);
+        const __m256i vl${N} = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn${N})));
+        vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m256 vs${N} = _mm256_castsi256_ps(_mm256_add_epi32(vl${N}, ven${N}));
+        __m256 vt${N} = _mm256_fmadd_ps(vn${N}, vminus_ln2, vz${N});
+
+      $for N in range(SIMD_TILE):
+        __m256 vp${N} = _mm256_fmadd_ps(vc4, vt${N}, vc3);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc2);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_mul_ps(vp${N}, vt${N});
+        vt${N} = _mm256_mul_ps(vt${N}, vs${N});
+
+      $for N in range(SIMD_TILE):
+        vs${N} = _mm256_fmsub_ps(vs${N}, valpha, valpha);
+        vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vt${N});
+
+      $for N in range(SIMD_TILE):
+        const __m256 ve${N} = _mm256_fmadd_ps(vp${N}, valpha, vs${N});
+        vx${N} = _mm256_mul_ps(vx${N}, vbeta);
+
+      $for N in range(SIMD_TILE):
+        const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N});
+
+      _mm256_storeu_ps(y, vy0);
+      $for N in range(1, SIMD_TILE):
+        _mm256_storeu_ps(y + ${N * 8}, vy${N});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/avx2-rr1-lut8-p4-perm.c.in b/src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
new file mode 100644
index 0000000..9e4f100
--- /dev/null
+++ b/src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
@@ -0,0 +1,163 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$SIMD_TILE = BATCH_TILE // 8
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  $if BATCH_TILE > 8:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m256 vx0 = _mm256_loadu_ps(x);
+      $for N in range(1, SIMD_TILE):
+        __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8});
+      x += ${BATCH_TILE};
+
+      $for N in range(SIMD_TILE):
+        const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale));
+
+      $for N in range(SIMD_TILE):
+        __m256 vn${N} = _mm256_fmadd_ps(vz${N}, vlog2e, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        const __m256i ven${N} = _mm256_slli_epi32(_mm256_castps_si256(vn${N}), 20);
+        const __m256i vl${N} = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn${N}));
+        vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m256 vs${N} = _mm256_castsi256_ps(_mm256_add_epi32(vl${N}, ven${N}));
+        __m256 vt${N} = _mm256_fmadd_ps(vn${N}, vminus_ln2, vz${N});
+
+      $for N in range(SIMD_TILE):
+        __m256 vp${N} = _mm256_fmadd_ps(vc4, vt${N}, vc3);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc2);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_mul_ps(vp${N}, vt${N});
+        vt${N} = _mm256_mul_ps(vt${N}, vs${N});
+
+      $for N in range(SIMD_TILE):
+        vs${N} = _mm256_fmsub_ps(vs${N}, valpha, valpha);
+        vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vt${N});
+
+      $for N in range(SIMD_TILE):
+        const __m256 ve${N} = _mm256_fmadd_ps(vp${N}, valpha, vs${N});
+        vx${N} = _mm256_mul_ps(vx${N}, vbeta);
+
+      $for N in range(SIMD_TILE):
+        const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N});
+
+      _mm256_storeu_ps(y, vy0);
+      $for N in range(1, SIMD_TILE):
+        _mm256_storeu_ps(y + ${N * 8}, vy${N});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/avx2-rr1-p6.c.in b/src/f32-velu/avx2-rr1-p6.c.in
new file mode 100644
index 0000000..3a6fb29
--- /dev/null
+++ b/src/f32-velu/avx2-rr1-p6.c.in
@@ -0,0 +1,167 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$SIMD_TILE = BATCH_TILE // 8
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  $if BATCH_TILE > 8:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m256 vx0 = _mm256_loadu_ps(x);
+      $for N in range(1, SIMD_TILE):
+        __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8});
+      x += ${BATCH_TILE};
+
+      $for N in range(SIMD_TILE):
+        const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale));
+
+      $for N in range(SIMD_TILE):
+        __m256 vn${N} = _mm256_fmadd_ps(vz${N}, vlog2e, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m256 vs${N} = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn${N}), 23));
+        vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m256 vt${N} = _mm256_fmadd_ps(vn${N}, vminus_ln2, vz${N});
+
+      $for N in range(SIMD_TILE):
+        __m256 vp${N} = _mm256_fmadd_ps(vc6, vt${N}, vc5);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc4);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc3);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc2);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm256_mul_ps(vp${N}, vt${N});
+        vt${N} = _mm256_mul_ps(vt${N}, vs${N});
+
+      $for N in range(SIMD_TILE):
+        vs${N} = _mm256_fmsub_ps(vs${N}, valpha, valpha);
+        vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vt${N});
+
+      $for N in range(SIMD_TILE):
+        const __m256 ve${N} = _mm256_fmadd_ps(vp${N}, valpha, vs${N});
+        vx${N} = _mm256_mul_ps(vx${N}, vbeta);
+
+      $for N in range(SIMD_TILE):
+        const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N});
+
+      _mm256_storeu_ps(y, vy0);
+      $for N in range(1, SIMD_TILE):
+        _mm256_storeu_ps(y + ${N * 8}, vy${N});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in b/src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
new file mode 100644
index 0000000..1ada953
--- /dev/null
+++ b/src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
@@ -0,0 +1,151 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 16 == 0
+$assert BATCH_TILE >= 16
+$SIMD_TILE = BATCH_TILE // 16
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512i vtable = _mm512_set_epi32(
+    0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
+    0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
+
+  $if BATCH_TILE > 16:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m512 vx0 = _mm512_loadu_ps(x);
+      $for N in range(1, SIMD_TILE):
+        __m512 vx${N} = _mm512_loadu_ps(x + ${N * 16});
+      x += ${BATCH_TILE};
+
+      $for N in range(SIMD_TILE):
+        const __m512 vz${N} = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx${N}, vprescale));
+
+      $for N in range(SIMD_TILE):
+        __m512 vn${N} = _mm512_fmadd_ps(vz${N}, vlog2e, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        const __m512i ven${N} = _mm512_slli_epi32(_mm512_castps_si512(vn${N}), 19);
+        const __m512i vl${N} = _mm512_permutexvar_epi32(_mm512_castps_si512(vn${N}), vtable);
+
+      $for N in range(SIMD_TILE):
+        __m512 vs${N} = _mm512_castsi512_ps(_mm512_add_epi32(vl${N}, ven${N}));
+        vn${N} = _mm512_sub_ps(vn${N}, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m512 vt${N} = _mm512_fmadd_ps(vn${N}, vminus_ln2, vz${N});
+
+      $for N in range(SIMD_TILE):
+        __m512 vp${N} = _mm512_fmadd_ps(vc3, vt${N}, vc2);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm512_mul_ps(vp${N}, vt${N});
+        vt${N} = _mm512_mul_ps(vt${N}, vs${N});
+
+      $for N in range(SIMD_TILE):
+        vs${N} = _mm512_fmsub_ps(vs${N}, valpha, valpha);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm512_fmadd_ps(vp${N}, vt${N}, vt${N});
+
+      const __m512 vzero = _mm512_setzero_ps();
+      $for N in range(SIMD_TILE):
+        __m512 vy${N} = _mm512_fmadd_ps(vp${N}, valpha, vs${N});
+        const __mmask16 vsign${N} = _mm512_cmp_ps_mask(vx${N}, vzero, _CMP_NLT_US);
+
+      $for N in range(SIMD_TILE):
+        vy${N} = _mm512_mask_mul_ps(vy${N}, vsign${N}, vx${N}, vbeta);
+
+      _mm512_storeu_ps(y, vy0);
+      $for N in range(1, SIMD_TILE):
+        _mm512_storeu_ps(y + ${N * 16}, vy${N});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/avx512f-rr1-p6.c.in b/src/f32-velu/avx512f-rr1-p6.c.in
new file mode 100644
index 0000000..8d20525
--- /dev/null
+++ b/src/f32-velu/avx512f-rr1-p6.c.in
@@ -0,0 +1,158 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 16 == 0
+$assert BATCH_TILE >= 16
+$SIMD_TILE = BATCH_TILE // 16
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_p6_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
+  const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
+  const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
+
+  $if BATCH_TILE > 16:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m512 vx0 = _mm512_loadu_ps(x);
+      $for N in range(1, SIMD_TILE):
+        __m512 vx${N} = _mm512_loadu_ps(x + ${N * 16});
+      x += ${BATCH_TILE};
+
+      $for N in range(SIMD_TILE):
+        const __m512 vz${N} = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx${N}, vprescale));
+
+      $for N in range(SIMD_TILE):
+        __m512 vn${N} = _mm512_fmadd_ps(vz${N}, vlog2e, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m512 vs${N} = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn${N}), 23));
+        vn${N} = _mm512_sub_ps(vn${N}, vmagic_bias);
+
+      $for N in range(SIMD_TILE):
+        __m512 vt${N} = _mm512_fmadd_ps(vn${N}, vminus_ln2, vz${N});
+
+      $for N in range(SIMD_TILE):
+        __m512 vp${N} = _mm512_fmadd_ps(vc6, vt${N}, vc5);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm512_fmadd_ps(vp${N}, vt${N}, vc4);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm512_fmadd_ps(vp${N}, vt${N}, vc3);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm512_fmadd_ps(vp${N}, vt${N}, vc2);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm512_mul_ps(vp${N}, vt${N});
+        vt${N} = _mm512_mul_ps(vt${N}, vs${N});
+
+      $for N in range(SIMD_TILE):
+        vs${N} = _mm512_fmsub_ps(vs${N}, valpha, valpha);
+
+      $for N in range(SIMD_TILE):
+        vp${N} = _mm512_fmadd_ps(vp${N}, vt${N}, vt${N});
+
+      const __m512 vzero = _mm512_setzero_ps();
+      $for N in range(SIMD_TILE):
+        __m512 vy${N} = _mm512_fmadd_ps(vp${N}, valpha, vs${N});
+        const __mmask16 vsign${N} = _mm512_cmp_ps_mask(vx${N}, vzero, _CMP_NLT_US);
+
+      $for N in range(SIMD_TILE):
+        vy${N} = _mm512_mask_mul_ps(vy${N}, vsign${N}, vx${N}, vbeta);
+
+      _mm512_storeu_ps(y, vy0);
+      $for N in range(1, SIMD_TILE):
+        _mm512_storeu_ps(y + ${N * 16}, vy${N});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x16.c b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x16.c
new file mode 100644
index 0000000..4cd93a8
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x16.c
@@ -0,0 +1,302 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0xF));
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    x += 16;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+
+    const __m256 vidx0 = _mm256_and_ps(vn0, vindex_mask);
+
+    const __m128i vidx0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx0)), 2);
+    const __m128i vidx0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx0, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx0_ll = (uint64_t) _mm_cvtsi128_si64(vidx0_lo);
+      const uint64_t vidx0_lh = (uint64_t) _mm_extract_epi64(vidx0_lo, 1);
+      const uint64_t vidx0_hl = (uint64_t) _mm_cvtsi128_si64(vidx0_hi);
+      const uint64_t vidx0_hh = (uint64_t) _mm_extract_epi64(vidx0_hi, 1);
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_ll));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_lh));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hl));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hh));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_ll >> 32))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_lh >> 32))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hl >> 32))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hh >> 32))), 1);
+    #else
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_lo)));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 2)));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_hi)));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 2)));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 1))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 3))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 1))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 3))), 1);
+    #endif
+    const __m128i vl0_lo = _mm_unpacklo_epi64(vl0_ll, vl0_lh);
+    const __m128i vl0_hi = _mm_unpacklo_epi64(vl0_hl, vl0_hh);
+    const __m256 vidx1 = _mm256_and_ps(vn1, vindex_mask);
+
+    const __m128i vidx1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx1)), 2);
+    const __m128i vidx1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx1, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx1_ll = (uint64_t) _mm_cvtsi128_si64(vidx1_lo);
+      const uint64_t vidx1_lh = (uint64_t) _mm_extract_epi64(vidx1_lo, 1);
+      const uint64_t vidx1_hl = (uint64_t) _mm_cvtsi128_si64(vidx1_hi);
+      const uint64_t vidx1_hh = (uint64_t) _mm_extract_epi64(vidx1_hi, 1);
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_ll));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_lh));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hl));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hh));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_ll >> 32))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_lh >> 32))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hl >> 32))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hh >> 32))), 1);
+    #else
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_lo)));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 2)));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_hi)));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 2)));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 1))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 3))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 1))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 3))), 1);
+    #endif
+    const __m128i vl1_lo = _mm_unpacklo_epi64(vl1_ll, vl1_lh);
+    const __m128i vl1_hi = _mm_unpacklo_epi64(vl1_hl, vl1_hh);
+
+    const __m128i ven0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 19);
+    const __m128i ven0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_add_epi32(vl0_lo, ven0_lo));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_add_epi32(vl0_hi, ven0_hi));
+    const __m128i ven1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 19);
+    const __m128i ven1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_add_epi32(vl1_lo, ven1_lo));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_add_epi32(vl1_hi, ven1_hi));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x24.c b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x24.c
new file mode 100644
index 0000000..1698f27
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x24.c
@@ -0,0 +1,351 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0xF));
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    x += 24;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+
+    const __m256 vidx0 = _mm256_and_ps(vn0, vindex_mask);
+
+    const __m128i vidx0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx0)), 2);
+    const __m128i vidx0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx0, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx0_ll = (uint64_t) _mm_cvtsi128_si64(vidx0_lo);
+      const uint64_t vidx0_lh = (uint64_t) _mm_extract_epi64(vidx0_lo, 1);
+      const uint64_t vidx0_hl = (uint64_t) _mm_cvtsi128_si64(vidx0_hi);
+      const uint64_t vidx0_hh = (uint64_t) _mm_extract_epi64(vidx0_hi, 1);
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_ll));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_lh));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hl));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hh));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_ll >> 32))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_lh >> 32))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hl >> 32))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hh >> 32))), 1);
+    #else
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_lo)));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 2)));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_hi)));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 2)));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 1))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 3))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 1))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 3))), 1);
+    #endif
+    const __m128i vl0_lo = _mm_unpacklo_epi64(vl0_ll, vl0_lh);
+    const __m128i vl0_hi = _mm_unpacklo_epi64(vl0_hl, vl0_hh);
+    const __m256 vidx1 = _mm256_and_ps(vn1, vindex_mask);
+
+    const __m128i vidx1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx1)), 2);
+    const __m128i vidx1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx1, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx1_ll = (uint64_t) _mm_cvtsi128_si64(vidx1_lo);
+      const uint64_t vidx1_lh = (uint64_t) _mm_extract_epi64(vidx1_lo, 1);
+      const uint64_t vidx1_hl = (uint64_t) _mm_cvtsi128_si64(vidx1_hi);
+      const uint64_t vidx1_hh = (uint64_t) _mm_extract_epi64(vidx1_hi, 1);
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_ll));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_lh));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hl));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hh));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_ll >> 32))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_lh >> 32))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hl >> 32))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hh >> 32))), 1);
+    #else
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_lo)));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 2)));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_hi)));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 2)));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 1))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 3))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 1))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 3))), 1);
+    #endif
+    const __m128i vl1_lo = _mm_unpacklo_epi64(vl1_ll, vl1_lh);
+    const __m128i vl1_hi = _mm_unpacklo_epi64(vl1_hl, vl1_hh);
+    const __m256 vidx2 = _mm256_and_ps(vn2, vindex_mask);
+
+    const __m128i vidx2_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx2)), 2);
+    const __m128i vidx2_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx2, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx2_ll = (uint64_t) _mm_cvtsi128_si64(vidx2_lo);
+      const uint64_t vidx2_lh = (uint64_t) _mm_extract_epi64(vidx2_lo, 1);
+      const uint64_t vidx2_hl = (uint64_t) _mm_cvtsi128_si64(vidx2_hi);
+      const uint64_t vidx2_hh = (uint64_t) _mm_extract_epi64(vidx2_hi, 1);
+      __m128i vl2_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_ll));
+      __m128i vl2_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_lh));
+      __m128i vl2_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_hl));
+      __m128i vl2_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_hh));
+      vl2_ll = _mm_insert_epi32(vl2_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_ll >> 32))), 1);
+      vl2_lh = _mm_insert_epi32(vl2_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_lh >> 32))), 1);
+      vl2_hl = _mm_insert_epi32(vl2_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_hl >> 32))), 1);
+      vl2_hh = _mm_insert_epi32(vl2_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_hh >> 32))), 1);
+    #else
+      __m128i vl2_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx2_lo)));
+      __m128i vl2_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 2)));
+      __m128i vl2_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx2_hi)));
+      __m128i vl2_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 2)));
+      vl2_ll = _mm_insert_epi32(vl2_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 1))), 1);
+      vl2_lh = _mm_insert_epi32(vl2_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 3))), 1);
+      vl2_hl = _mm_insert_epi32(vl2_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 1))), 1);
+      vl2_hh = _mm_insert_epi32(vl2_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 3))), 1);
+    #endif
+    const __m128i vl2_lo = _mm_unpacklo_epi64(vl2_ll, vl2_lh);
+    const __m128i vl2_hi = _mm_unpacklo_epi64(vl2_hl, vl2_hh);
+
+    const __m128i ven0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 19);
+    const __m128i ven0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_add_epi32(vl0_lo, ven0_lo));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_add_epi32(vl0_hi, ven0_hi));
+    const __m128i ven1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 19);
+    const __m128i ven1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_add_epi32(vl1_lo, ven1_lo));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_add_epi32(vl1_hi, ven1_hi));
+    const __m128i ven2_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 19);
+    const __m128i ven2_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 vs2_lo = _mm_castsi128_ps(_mm_add_epi32(vl2_lo, ven2_lo));
+    const __m128 vs2_hi = _mm_castsi128_ps(_mm_add_epi32(vl2_hi, ven2_hi));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    y += 24;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x32.c b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x32.c
new file mode 100644
index 0000000..b046f7d
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x32.c
@@ -0,0 +1,400 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0xF));
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    x += 32;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+    __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
+
+    const __m256 vidx0 = _mm256_and_ps(vn0, vindex_mask);
+
+    const __m128i vidx0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx0)), 2);
+    const __m128i vidx0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx0, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx0_ll = (uint64_t) _mm_cvtsi128_si64(vidx0_lo);
+      const uint64_t vidx0_lh = (uint64_t) _mm_extract_epi64(vidx0_lo, 1);
+      const uint64_t vidx0_hl = (uint64_t) _mm_cvtsi128_si64(vidx0_hi);
+      const uint64_t vidx0_hh = (uint64_t) _mm_extract_epi64(vidx0_hi, 1);
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_ll));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_lh));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hl));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hh));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_ll >> 32))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_lh >> 32))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hl >> 32))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hh >> 32))), 1);
+    #else
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_lo)));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 2)));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_hi)));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 2)));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 1))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 3))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 1))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 3))), 1);
+    #endif
+    const __m128i vl0_lo = _mm_unpacklo_epi64(vl0_ll, vl0_lh);
+    const __m128i vl0_hi = _mm_unpacklo_epi64(vl0_hl, vl0_hh);
+    const __m256 vidx1 = _mm256_and_ps(vn1, vindex_mask);
+
+    const __m128i vidx1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx1)), 2);
+    const __m128i vidx1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx1, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx1_ll = (uint64_t) _mm_cvtsi128_si64(vidx1_lo);
+      const uint64_t vidx1_lh = (uint64_t) _mm_extract_epi64(vidx1_lo, 1);
+      const uint64_t vidx1_hl = (uint64_t) _mm_cvtsi128_si64(vidx1_hi);
+      const uint64_t vidx1_hh = (uint64_t) _mm_extract_epi64(vidx1_hi, 1);
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_ll));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_lh));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hl));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hh));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_ll >> 32))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_lh >> 32))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hl >> 32))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hh >> 32))), 1);
+    #else
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_lo)));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 2)));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_hi)));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 2)));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 1))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 3))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 1))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 3))), 1);
+    #endif
+    const __m128i vl1_lo = _mm_unpacklo_epi64(vl1_ll, vl1_lh);
+    const __m128i vl1_hi = _mm_unpacklo_epi64(vl1_hl, vl1_hh);
+    const __m256 vidx2 = _mm256_and_ps(vn2, vindex_mask);
+
+    const __m128i vidx2_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx2)), 2);
+    const __m128i vidx2_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx2, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx2_ll = (uint64_t) _mm_cvtsi128_si64(vidx2_lo);
+      const uint64_t vidx2_lh = (uint64_t) _mm_extract_epi64(vidx2_lo, 1);
+      const uint64_t vidx2_hl = (uint64_t) _mm_cvtsi128_si64(vidx2_hi);
+      const uint64_t vidx2_hh = (uint64_t) _mm_extract_epi64(vidx2_hi, 1);
+      __m128i vl2_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_ll));
+      __m128i vl2_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_lh));
+      __m128i vl2_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_hl));
+      __m128i vl2_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_hh));
+      vl2_ll = _mm_insert_epi32(vl2_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_ll >> 32))), 1);
+      vl2_lh = _mm_insert_epi32(vl2_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_lh >> 32))), 1);
+      vl2_hl = _mm_insert_epi32(vl2_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_hl >> 32))), 1);
+      vl2_hh = _mm_insert_epi32(vl2_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_hh >> 32))), 1);
+    #else
+      __m128i vl2_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx2_lo)));
+      __m128i vl2_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 2)));
+      __m128i vl2_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx2_hi)));
+      __m128i vl2_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 2)));
+      vl2_ll = _mm_insert_epi32(vl2_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 1))), 1);
+      vl2_lh = _mm_insert_epi32(vl2_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 3))), 1);
+      vl2_hl = _mm_insert_epi32(vl2_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 1))), 1);
+      vl2_hh = _mm_insert_epi32(vl2_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 3))), 1);
+    #endif
+    const __m128i vl2_lo = _mm_unpacklo_epi64(vl2_ll, vl2_lh);
+    const __m128i vl2_hi = _mm_unpacklo_epi64(vl2_hl, vl2_hh);
+    const __m256 vidx3 = _mm256_and_ps(vn3, vindex_mask);
+
+    const __m128i vidx3_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx3)), 2);
+    const __m128i vidx3_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx3, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx3_ll = (uint64_t) _mm_cvtsi128_si64(vidx3_lo);
+      const uint64_t vidx3_lh = (uint64_t) _mm_extract_epi64(vidx3_lo, 1);
+      const uint64_t vidx3_hl = (uint64_t) _mm_cvtsi128_si64(vidx3_hi);
+      const uint64_t vidx3_hh = (uint64_t) _mm_extract_epi64(vidx3_hi, 1);
+      __m128i vl3_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_ll));
+      __m128i vl3_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_lh));
+      __m128i vl3_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_hl));
+      __m128i vl3_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_hh));
+      vl3_ll = _mm_insert_epi32(vl3_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_ll >> 32))), 1);
+      vl3_lh = _mm_insert_epi32(vl3_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_lh >> 32))), 1);
+      vl3_hl = _mm_insert_epi32(vl3_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_hl >> 32))), 1);
+      vl3_hh = _mm_insert_epi32(vl3_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_hh >> 32))), 1);
+    #else
+      __m128i vl3_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx3_lo)));
+      __m128i vl3_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_lo, 2)));
+      __m128i vl3_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx3_hi)));
+      __m128i vl3_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_hi, 2)));
+      vl3_ll = _mm_insert_epi32(vl3_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_lo, 1))), 1);
+      vl3_lh = _mm_insert_epi32(vl3_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_lo, 3))), 1);
+      vl3_hl = _mm_insert_epi32(vl3_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_hi, 1))), 1);
+      vl3_hh = _mm_insert_epi32(vl3_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_hi, 3))), 1);
+    #endif
+    const __m128i vl3_lo = _mm_unpacklo_epi64(vl3_ll, vl3_lh);
+    const __m128i vl3_hi = _mm_unpacklo_epi64(vl3_hl, vl3_hh);
+
+    const __m128i ven0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 19);
+    const __m128i ven0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_add_epi32(vl0_lo, ven0_lo));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_add_epi32(vl0_hi, ven0_hi));
+    const __m128i ven1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 19);
+    const __m128i ven1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_add_epi32(vl1_lo, ven1_lo));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_add_epi32(vl1_hi, ven1_hi));
+    const __m128i ven2_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 19);
+    const __m128i ven2_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 vs2_lo = _mm_castsi128_ps(_mm_add_epi32(vl2_lo, ven2_lo));
+    const __m128 vs2_hi = _mm_castsi128_ps(_mm_add_epi32(vl2_hi, ven2_hi));
+    const __m128i ven3_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 19);
+    const __m128i ven3_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m128 vs3_lo = _mm_castsi128_ps(_mm_add_epi32(vl3_lo, ven3_lo));
+    const __m128 vs3_hi = _mm_castsi128_ps(_mm_add_epi32(vl3_hi, ven3_hi));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
+    vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
+    __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs3_lo), vs3_hi, 1);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2);
+    __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vs3 = _mm256_sub_ps(vs3, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    y += 32;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x40.c b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x40.c
new file mode 100644
index 0000000..1282d19
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x40.c
@@ -0,0 +1,449 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0xF));
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 40 * sizeof(float); n -= 40 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    x += 40;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+    __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
+    __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
+
+    const __m256 vidx0 = _mm256_and_ps(vn0, vindex_mask);
+
+    const __m128i vidx0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx0)), 2);
+    const __m128i vidx0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx0, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx0_ll = (uint64_t) _mm_cvtsi128_si64(vidx0_lo);
+      const uint64_t vidx0_lh = (uint64_t) _mm_extract_epi64(vidx0_lo, 1);
+      const uint64_t vidx0_hl = (uint64_t) _mm_cvtsi128_si64(vidx0_hi);
+      const uint64_t vidx0_hh = (uint64_t) _mm_extract_epi64(vidx0_hi, 1);
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_ll));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_lh));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hl));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hh));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_ll >> 32))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_lh >> 32))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hl >> 32))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hh >> 32))), 1);
+    #else
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_lo)));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 2)));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_hi)));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 2)));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 1))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 3))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 1))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 3))), 1);
+    #endif
+    const __m128i vl0_lo = _mm_unpacklo_epi64(vl0_ll, vl0_lh);
+    const __m128i vl0_hi = _mm_unpacklo_epi64(vl0_hl, vl0_hh);
+    const __m256 vidx1 = _mm256_and_ps(vn1, vindex_mask);
+
+    const __m128i vidx1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx1)), 2);
+    const __m128i vidx1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx1, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx1_ll = (uint64_t) _mm_cvtsi128_si64(vidx1_lo);
+      const uint64_t vidx1_lh = (uint64_t) _mm_extract_epi64(vidx1_lo, 1);
+      const uint64_t vidx1_hl = (uint64_t) _mm_cvtsi128_si64(vidx1_hi);
+      const uint64_t vidx1_hh = (uint64_t) _mm_extract_epi64(vidx1_hi, 1);
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_ll));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_lh));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hl));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hh));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_ll >> 32))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_lh >> 32))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hl >> 32))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hh >> 32))), 1);
+    #else
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_lo)));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 2)));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_hi)));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 2)));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 1))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 3))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 1))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 3))), 1);
+    #endif
+    const __m128i vl1_lo = _mm_unpacklo_epi64(vl1_ll, vl1_lh);
+    const __m128i vl1_hi = _mm_unpacklo_epi64(vl1_hl, vl1_hh);
+    const __m256 vidx2 = _mm256_and_ps(vn2, vindex_mask);
+
+    const __m128i vidx2_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx2)), 2);
+    const __m128i vidx2_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx2, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx2_ll = (uint64_t) _mm_cvtsi128_si64(vidx2_lo);
+      const uint64_t vidx2_lh = (uint64_t) _mm_extract_epi64(vidx2_lo, 1);
+      const uint64_t vidx2_hl = (uint64_t) _mm_cvtsi128_si64(vidx2_hi);
+      const uint64_t vidx2_hh = (uint64_t) _mm_extract_epi64(vidx2_hi, 1);
+      __m128i vl2_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_ll));
+      __m128i vl2_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_lh));
+      __m128i vl2_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_hl));
+      __m128i vl2_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_hh));
+      vl2_ll = _mm_insert_epi32(vl2_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_ll >> 32))), 1);
+      vl2_lh = _mm_insert_epi32(vl2_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_lh >> 32))), 1);
+      vl2_hl = _mm_insert_epi32(vl2_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_hl >> 32))), 1);
+      vl2_hh = _mm_insert_epi32(vl2_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_hh >> 32))), 1);
+    #else
+      __m128i vl2_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx2_lo)));
+      __m128i vl2_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 2)));
+      __m128i vl2_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx2_hi)));
+      __m128i vl2_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 2)));
+      vl2_ll = _mm_insert_epi32(vl2_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 1))), 1);
+      vl2_lh = _mm_insert_epi32(vl2_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 3))), 1);
+      vl2_hl = _mm_insert_epi32(vl2_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 1))), 1);
+      vl2_hh = _mm_insert_epi32(vl2_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 3))), 1);
+    #endif
+    const __m128i vl2_lo = _mm_unpacklo_epi64(vl2_ll, vl2_lh);
+    const __m128i vl2_hi = _mm_unpacklo_epi64(vl2_hl, vl2_hh);
+    const __m256 vidx3 = _mm256_and_ps(vn3, vindex_mask);
+
+    const __m128i vidx3_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx3)), 2);
+    const __m128i vidx3_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx3, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx3_ll = (uint64_t) _mm_cvtsi128_si64(vidx3_lo);
+      const uint64_t vidx3_lh = (uint64_t) _mm_extract_epi64(vidx3_lo, 1);
+      const uint64_t vidx3_hl = (uint64_t) _mm_cvtsi128_si64(vidx3_hi);
+      const uint64_t vidx3_hh = (uint64_t) _mm_extract_epi64(vidx3_hi, 1);
+      __m128i vl3_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_ll));
+      __m128i vl3_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_lh));
+      __m128i vl3_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_hl));
+      __m128i vl3_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_hh));
+      vl3_ll = _mm_insert_epi32(vl3_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_ll >> 32))), 1);
+      vl3_lh = _mm_insert_epi32(vl3_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_lh >> 32))), 1);
+      vl3_hl = _mm_insert_epi32(vl3_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_hl >> 32))), 1);
+      vl3_hh = _mm_insert_epi32(vl3_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_hh >> 32))), 1);
+    #else
+      __m128i vl3_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx3_lo)));
+      __m128i vl3_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_lo, 2)));
+      __m128i vl3_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx3_hi)));
+      __m128i vl3_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_hi, 2)));
+      vl3_ll = _mm_insert_epi32(vl3_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_lo, 1))), 1);
+      vl3_lh = _mm_insert_epi32(vl3_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_lo, 3))), 1);
+      vl3_hl = _mm_insert_epi32(vl3_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_hi, 1))), 1);
+      vl3_hh = _mm_insert_epi32(vl3_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_hi, 3))), 1);
+    #endif
+    const __m128i vl3_lo = _mm_unpacklo_epi64(vl3_ll, vl3_lh);
+    const __m128i vl3_hi = _mm_unpacklo_epi64(vl3_hl, vl3_hh);
+    const __m256 vidx4 = _mm256_and_ps(vn4, vindex_mask);
+
+    const __m128i vidx4_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx4)), 2);
+    const __m128i vidx4_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx4, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx4_ll = (uint64_t) _mm_cvtsi128_si64(vidx4_lo);
+      const uint64_t vidx4_lh = (uint64_t) _mm_extract_epi64(vidx4_lo, 1);
+      const uint64_t vidx4_hl = (uint64_t) _mm_cvtsi128_si64(vidx4_hi);
+      const uint64_t vidx4_hh = (uint64_t) _mm_extract_epi64(vidx4_hi, 1);
+      __m128i vl4_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4_ll));
+      __m128i vl4_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4_lh));
+      __m128i vl4_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4_hl));
+      __m128i vl4_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4_hh));
+      vl4_ll = _mm_insert_epi32(vl4_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx4_ll >> 32))), 1);
+      vl4_lh = _mm_insert_epi32(vl4_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx4_lh >> 32))), 1);
+      vl4_hl = _mm_insert_epi32(vl4_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx4_hl >> 32))), 1);
+      vl4_hh = _mm_insert_epi32(vl4_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx4_hh >> 32))), 1);
+    #else
+      __m128i vl4_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx4_lo)));
+      __m128i vl4_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_lo, 2)));
+      __m128i vl4_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx4_hi)));
+      __m128i vl4_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_hi, 2)));
+      vl4_ll = _mm_insert_epi32(vl4_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_lo, 1))), 1);
+      vl4_lh = _mm_insert_epi32(vl4_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_lo, 3))), 1);
+      vl4_hl = _mm_insert_epi32(vl4_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_hi, 1))), 1);
+      vl4_hh = _mm_insert_epi32(vl4_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_hi, 3))), 1);
+    #endif
+    const __m128i vl4_lo = _mm_unpacklo_epi64(vl4_ll, vl4_lh);
+    const __m128i vl4_hi = _mm_unpacklo_epi64(vl4_hl, vl4_hh);
+
+    const __m128i ven0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 19);
+    const __m128i ven0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_add_epi32(vl0_lo, ven0_lo));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_add_epi32(vl0_hi, ven0_hi));
+    const __m128i ven1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 19);
+    const __m128i ven1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_add_epi32(vl1_lo, ven1_lo));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_add_epi32(vl1_hi, ven1_hi));
+    const __m128i ven2_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 19);
+    const __m128i ven2_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 vs2_lo = _mm_castsi128_ps(_mm_add_epi32(vl2_lo, ven2_lo));
+    const __m128 vs2_hi = _mm_castsi128_ps(_mm_add_epi32(vl2_hi, ven2_hi));
+    const __m128i ven3_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 19);
+    const __m128i ven3_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m128 vs3_lo = _mm_castsi128_ps(_mm_add_epi32(vl3_lo, ven3_lo));
+    const __m128 vs3_hi = _mm_castsi128_ps(_mm_add_epi32(vl3_hi, ven3_hi));
+    const __m128i ven4_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 19);
+    const __m128i ven4_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 19);
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m128 vs4_lo = _mm_castsi128_ps(_mm_add_epi32(vl4_lo, ven4_lo));
+    const __m128 vs4_hi = _mm_castsi128_ps(_mm_add_epi32(vl4_hi, ven4_hi));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
+    __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
+    vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
+    __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs3_lo), vs3_hi, 1);
+    vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
+    __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs4_lo), vs4_hi, 1);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2);
+    __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2);
+    __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vs3 = _mm256_sub_ps(vs3, vone);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vs4 = _mm256_sub_ps(vs4, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    y += 40;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x48.c b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x48.c
new file mode 100644
index 0000000..8795f25
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x48.c
@@ -0,0 +1,498 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0xF));
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    x += 48;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+    __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
+    __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
+    __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
+
+    const __m256 vidx0 = _mm256_and_ps(vn0, vindex_mask);
+
+    const __m128i vidx0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx0)), 2);
+    const __m128i vidx0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx0, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx0_ll = (uint64_t) _mm_cvtsi128_si64(vidx0_lo);
+      const uint64_t vidx0_lh = (uint64_t) _mm_extract_epi64(vidx0_lo, 1);
+      const uint64_t vidx0_hl = (uint64_t) _mm_cvtsi128_si64(vidx0_hi);
+      const uint64_t vidx0_hh = (uint64_t) _mm_extract_epi64(vidx0_hi, 1);
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_ll));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_lh));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hl));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0_hh));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_ll >> 32))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_lh >> 32))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hl >> 32))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx0_hh >> 32))), 1);
+    #else
+      __m128i vl0_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_lo)));
+      __m128i vl0_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 2)));
+      __m128i vl0_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx0_hi)));
+      __m128i vl0_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 2)));
+      vl0_ll = _mm_insert_epi32(vl0_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 1))), 1);
+      vl0_lh = _mm_insert_epi32(vl0_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_lo, 3))), 1);
+      vl0_hl = _mm_insert_epi32(vl0_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 1))), 1);
+      vl0_hh = _mm_insert_epi32(vl0_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx0_hi, 3))), 1);
+    #endif
+    const __m128i vl0_lo = _mm_unpacklo_epi64(vl0_ll, vl0_lh);
+    const __m128i vl0_hi = _mm_unpacklo_epi64(vl0_hl, vl0_hh);
+    const __m256 vidx1 = _mm256_and_ps(vn1, vindex_mask);
+
+    const __m128i vidx1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx1)), 2);
+    const __m128i vidx1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx1, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx1_ll = (uint64_t) _mm_cvtsi128_si64(vidx1_lo);
+      const uint64_t vidx1_lh = (uint64_t) _mm_extract_epi64(vidx1_lo, 1);
+      const uint64_t vidx1_hl = (uint64_t) _mm_cvtsi128_si64(vidx1_hi);
+      const uint64_t vidx1_hh = (uint64_t) _mm_extract_epi64(vidx1_hi, 1);
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_ll));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_lh));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hl));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1_hh));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_ll >> 32))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_lh >> 32))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hl >> 32))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx1_hh >> 32))), 1);
+    #else
+      __m128i vl1_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_lo)));
+      __m128i vl1_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 2)));
+      __m128i vl1_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx1_hi)));
+      __m128i vl1_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 2)));
+      vl1_ll = _mm_insert_epi32(vl1_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 1))), 1);
+      vl1_lh = _mm_insert_epi32(vl1_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_lo, 3))), 1);
+      vl1_hl = _mm_insert_epi32(vl1_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 1))), 1);
+      vl1_hh = _mm_insert_epi32(vl1_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx1_hi, 3))), 1);
+    #endif
+    const __m128i vl1_lo = _mm_unpacklo_epi64(vl1_ll, vl1_lh);
+    const __m128i vl1_hi = _mm_unpacklo_epi64(vl1_hl, vl1_hh);
+    const __m256 vidx2 = _mm256_and_ps(vn2, vindex_mask);
+
+    const __m128i vidx2_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx2)), 2);
+    const __m128i vidx2_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx2, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx2_ll = (uint64_t) _mm_cvtsi128_si64(vidx2_lo);
+      const uint64_t vidx2_lh = (uint64_t) _mm_extract_epi64(vidx2_lo, 1);
+      const uint64_t vidx2_hl = (uint64_t) _mm_cvtsi128_si64(vidx2_hi);
+      const uint64_t vidx2_hh = (uint64_t) _mm_extract_epi64(vidx2_hi, 1);
+      __m128i vl2_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_ll));
+      __m128i vl2_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_lh));
+      __m128i vl2_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_hl));
+      __m128i vl2_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2_hh));
+      vl2_ll = _mm_insert_epi32(vl2_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_ll >> 32))), 1);
+      vl2_lh = _mm_insert_epi32(vl2_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_lh >> 32))), 1);
+      vl2_hl = _mm_insert_epi32(vl2_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_hl >> 32))), 1);
+      vl2_hh = _mm_insert_epi32(vl2_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx2_hh >> 32))), 1);
+    #else
+      __m128i vl2_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx2_lo)));
+      __m128i vl2_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 2)));
+      __m128i vl2_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx2_hi)));
+      __m128i vl2_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 2)));
+      vl2_ll = _mm_insert_epi32(vl2_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 1))), 1);
+      vl2_lh = _mm_insert_epi32(vl2_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_lo, 3))), 1);
+      vl2_hl = _mm_insert_epi32(vl2_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 1))), 1);
+      vl2_hh = _mm_insert_epi32(vl2_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx2_hi, 3))), 1);
+    #endif
+    const __m128i vl2_lo = _mm_unpacklo_epi64(vl2_ll, vl2_lh);
+    const __m128i vl2_hi = _mm_unpacklo_epi64(vl2_hl, vl2_hh);
+    const __m256 vidx3 = _mm256_and_ps(vn3, vindex_mask);
+
+    const __m128i vidx3_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx3)), 2);
+    const __m128i vidx3_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx3, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx3_ll = (uint64_t) _mm_cvtsi128_si64(vidx3_lo);
+      const uint64_t vidx3_lh = (uint64_t) _mm_extract_epi64(vidx3_lo, 1);
+      const uint64_t vidx3_hl = (uint64_t) _mm_cvtsi128_si64(vidx3_hi);
+      const uint64_t vidx3_hh = (uint64_t) _mm_extract_epi64(vidx3_hi, 1);
+      __m128i vl3_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_ll));
+      __m128i vl3_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_lh));
+      __m128i vl3_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_hl));
+      __m128i vl3_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3_hh));
+      vl3_ll = _mm_insert_epi32(vl3_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_ll >> 32))), 1);
+      vl3_lh = _mm_insert_epi32(vl3_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_lh >> 32))), 1);
+      vl3_hl = _mm_insert_epi32(vl3_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_hl >> 32))), 1);
+      vl3_hh = _mm_insert_epi32(vl3_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx3_hh >> 32))), 1);
+    #else
+      __m128i vl3_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx3_lo)));
+      __m128i vl3_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_lo, 2)));
+      __m128i vl3_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx3_hi)));
+      __m128i vl3_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_hi, 2)));
+      vl3_ll = _mm_insert_epi32(vl3_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_lo, 1))), 1);
+      vl3_lh = _mm_insert_epi32(vl3_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_lo, 3))), 1);
+      vl3_hl = _mm_insert_epi32(vl3_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_hi, 1))), 1);
+      vl3_hh = _mm_insert_epi32(vl3_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx3_hi, 3))), 1);
+    #endif
+    const __m128i vl3_lo = _mm_unpacklo_epi64(vl3_ll, vl3_lh);
+    const __m128i vl3_hi = _mm_unpacklo_epi64(vl3_hl, vl3_hh);
+    const __m256 vidx4 = _mm256_and_ps(vn4, vindex_mask);
+
+    const __m128i vidx4_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx4)), 2);
+    const __m128i vidx4_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx4, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx4_ll = (uint64_t) _mm_cvtsi128_si64(vidx4_lo);
+      const uint64_t vidx4_lh = (uint64_t) _mm_extract_epi64(vidx4_lo, 1);
+      const uint64_t vidx4_hl = (uint64_t) _mm_cvtsi128_si64(vidx4_hi);
+      const uint64_t vidx4_hh = (uint64_t) _mm_extract_epi64(vidx4_hi, 1);
+      __m128i vl4_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4_ll));
+      __m128i vl4_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4_lh));
+      __m128i vl4_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4_hl));
+      __m128i vl4_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4_hh));
+      vl4_ll = _mm_insert_epi32(vl4_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx4_ll >> 32))), 1);
+      vl4_lh = _mm_insert_epi32(vl4_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx4_lh >> 32))), 1);
+      vl4_hl = _mm_insert_epi32(vl4_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx4_hl >> 32))), 1);
+      vl4_hh = _mm_insert_epi32(vl4_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx4_hh >> 32))), 1);
+    #else
+      __m128i vl4_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx4_lo)));
+      __m128i vl4_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_lo, 2)));
+      __m128i vl4_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx4_hi)));
+      __m128i vl4_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_hi, 2)));
+      vl4_ll = _mm_insert_epi32(vl4_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_lo, 1))), 1);
+      vl4_lh = _mm_insert_epi32(vl4_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_lo, 3))), 1);
+      vl4_hl = _mm_insert_epi32(vl4_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_hi, 1))), 1);
+      vl4_hh = _mm_insert_epi32(vl4_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx4_hi, 3))), 1);
+    #endif
+    const __m128i vl4_lo = _mm_unpacklo_epi64(vl4_ll, vl4_lh);
+    const __m128i vl4_hi = _mm_unpacklo_epi64(vl4_hl, vl4_hh);
+    const __m256 vidx5 = _mm256_and_ps(vn5, vindex_mask);
+
+    const __m128i vidx5_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx5)), 2);
+    const __m128i vidx5_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx5, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx5_ll = (uint64_t) _mm_cvtsi128_si64(vidx5_lo);
+      const uint64_t vidx5_lh = (uint64_t) _mm_extract_epi64(vidx5_lo, 1);
+      const uint64_t vidx5_hl = (uint64_t) _mm_cvtsi128_si64(vidx5_hi);
+      const uint64_t vidx5_hh = (uint64_t) _mm_extract_epi64(vidx5_hi, 1);
+      __m128i vl5_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5_ll));
+      __m128i vl5_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5_lh));
+      __m128i vl5_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5_hl));
+      __m128i vl5_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5_hh));
+      vl5_ll = _mm_insert_epi32(vl5_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx5_ll >> 32))), 1);
+      vl5_lh = _mm_insert_epi32(vl5_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx5_lh >> 32))), 1);
+      vl5_hl = _mm_insert_epi32(vl5_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx5_hl >> 32))), 1);
+      vl5_hh = _mm_insert_epi32(vl5_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx5_hh >> 32))), 1);
+    #else
+      __m128i vl5_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx5_lo)));
+      __m128i vl5_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx5_lo, 2)));
+      __m128i vl5_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx5_hi)));
+      __m128i vl5_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx5_hi, 2)));
+      vl5_ll = _mm_insert_epi32(vl5_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx5_lo, 1))), 1);
+      vl5_lh = _mm_insert_epi32(vl5_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx5_lo, 3))), 1);
+      vl5_hl = _mm_insert_epi32(vl5_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx5_hi, 1))), 1);
+      vl5_hh = _mm_insert_epi32(vl5_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx5_hi, 3))), 1);
+    #endif
+    const __m128i vl5_lo = _mm_unpacklo_epi64(vl5_ll, vl5_lh);
+    const __m128i vl5_hi = _mm_unpacklo_epi64(vl5_hl, vl5_hh);
+
+    const __m128i ven0_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 19);
+    const __m128i ven0_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_add_epi32(vl0_lo, ven0_lo));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_add_epi32(vl0_hi, ven0_hi));
+    const __m128i ven1_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 19);
+    const __m128i ven1_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_add_epi32(vl1_lo, ven1_lo));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_add_epi32(vl1_hi, ven1_hi));
+    const __m128i ven2_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 19);
+    const __m128i ven2_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 vs2_lo = _mm_castsi128_ps(_mm_add_epi32(vl2_lo, ven2_lo));
+    const __m128 vs2_hi = _mm_castsi128_ps(_mm_add_epi32(vl2_hi, ven2_hi));
+    const __m128i ven3_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 19);
+    const __m128i ven3_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m128 vs3_lo = _mm_castsi128_ps(_mm_add_epi32(vl3_lo, ven3_lo));
+    const __m128 vs3_hi = _mm_castsi128_ps(_mm_add_epi32(vl3_hi, ven3_hi));
+    const __m128i ven4_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 19);
+    const __m128i ven4_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 19);
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m128 vs4_lo = _mm_castsi128_ps(_mm_add_epi32(vl4_lo, ven4_lo));
+    const __m128 vs4_hi = _mm_castsi128_ps(_mm_add_epi32(vl4_hi, ven4_hi));
+    const __m128i ven5_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 19);
+    const __m128i ven5_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 19);
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m128 vs5_lo = _mm_castsi128_ps(_mm_add_epi32(vl5_lo, ven5_lo));
+    const __m128 vs5_hi = _mm_castsi128_ps(_mm_add_epi32(vl5_hi, ven5_hi));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
+    __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
+    __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
+    vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
+    __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs3_lo), vs3_hi, 1);
+    vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
+    __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs4_lo), vs4_hi, 1);
+    vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
+    __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs5_lo), vs5_hi, 1);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2);
+    __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2);
+    __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2);
+    __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vs3 = _mm256_sub_ps(vs3, vone);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vs4 = _mm256_sub_ps(vs4, vone);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vs5 = _mm256_sub_ps(vs5, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
+    vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vt5);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_mul_ps(_mm256_add_ps(vp5, vs5), valpha);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    y += 48;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x8.c b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x8.c
new file mode 100644
index 0000000..f27a1aa
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut16-p3-x8.c
@@ -0,0 +1,187 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0xF));
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m256 vidx = _mm256_and_ps(vn, vindex_mask);
+
+    const __m128i vidx_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vidx)), 2);
+    const __m128i vidx_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vidx, 1)), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_ll = (uint64_t) _mm_cvtsi128_si64(vidx_lo);
+      const uint64_t vidx_lh = (uint64_t) _mm_extract_epi64(vidx_lo, 1);
+      const uint64_t vidx_hl = (uint64_t) _mm_cvtsi128_si64(vidx_hi);
+      const uint64_t vidx_hh = (uint64_t) _mm_extract_epi64(vidx_hi, 1);
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_ll));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lh));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hl));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hh));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_ll >> 32))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lh >> 32))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hl >> 32))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hh >> 32))), 1);
+    #else
+      __m128i vl_ll = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_lo)));
+      __m128i vl_lh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 2)));
+      __m128i vl_hl = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx_hi)));
+      __m128i vl_hh = _mm_loadu_si32((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 2)));
+      vl_ll = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 1))), 1);
+      vl_lh = _mm_insert_epi32(vl_lh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_lo, 3))), 1);
+      vl_hl = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 1))), 1);
+      vl_hh = _mm_insert_epi32(vl_hh, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi32(vidx_hi, 3))), 1);
+    #endif
+    const __m128i ven_lo = _mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 19);
+    const __m128i ven_hi = _mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 19);
+
+    const __m128i vl_lo = _mm_unpacklo_epi64(vl_ll, vl_lh);
+    const __m128i vl_hi = _mm_unpacklo_epi64(vl_hl, vl_hh);
+
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_add_epi32(vl_lo, ven_lo));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_add_epi32(vl_hi, ven_hi));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x16.c b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x16.c
new file mode 100644
index 0000000..90a2597
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x16.c
@@ -0,0 +1,190 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8003F8p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x3));
+  const __m256 vtable = _mm256_set_ps(
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f,
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    x += 16;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+
+    __m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
+    const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
+    const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
+    __m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
+    const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
+    const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
+
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_mul_ps(vl0, ven0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_mul_ps(vl1, ven1);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x24.c b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x24.c
new file mode 100644
index 0000000..f640ad3
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x24.c
@@ -0,0 +1,212 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8003F8p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x3));
+  const __m256 vtable = _mm256_set_ps(
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f,
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    x += 24;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+
+    __m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
+    const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
+    const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
+    __m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
+    const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
+    const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
+    __m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2);
+    const __m256 vl2 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2));
+    const __m128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21));
+
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_mul_ps(vl0, ven0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_mul_ps(vl1, ven1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    __m256 vs2 = _mm256_mul_ps(vl2, ven2);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc4, vt2), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    y += 24;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x32.c b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x32.c
new file mode 100644
index 0000000..9b87a6b
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x32.c
@@ -0,0 +1,234 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8003F8p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x3));
+  const __m256 vtable = _mm256_set_ps(
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f,
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    x += 32;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+    __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
+
+    __m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
+    const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
+    const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
+    __m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
+    const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
+    const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
+    __m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2);
+    const __m256 vl2 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2));
+    const __m128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21));
+    __m256 ven3 = _mm256_andnot_ps(vindex_mask, vn3);
+    const __m256 vl3 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3));
+    const __m128 ven3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven3)), 21));
+
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m128 ven3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven3, 1)), 21));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1);
+    __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
+    ven3 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven3_lo), ven3_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_mul_ps(vl0, ven0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_mul_ps(vl1, ven1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    __m256 vs2 = _mm256_mul_ps(vl2, ven2);
+    vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
+    __m256 vs3 = _mm256_mul_ps(vl3, ven3);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc4, vt2), vc3);
+    __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc4, vt3), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vs3 = _mm256_sub_ps(vs3, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    y += 32;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x40.c b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x40.c
new file mode 100644
index 0000000..e6c3121
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x40.c
@@ -0,0 +1,256 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8003F8p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x3));
+  const __m256 vtable = _mm256_set_ps(
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f,
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 40 * sizeof(float); n -= 40 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    x += 40;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+    __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
+    __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
+
+    __m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
+    const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
+    const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
+    __m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
+    const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
+    const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
+    __m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2);
+    const __m256 vl2 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2));
+    const __m128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21));
+    __m256 ven3 = _mm256_andnot_ps(vindex_mask, vn3);
+    const __m256 vl3 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3));
+    const __m128 ven3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven3)), 21));
+    __m256 ven4 = _mm256_andnot_ps(vindex_mask, vn4);
+    const __m256 vl4 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4));
+    const __m128 ven4_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven4)), 21));
+
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m128 ven3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven3, 1)), 21));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m128 ven4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven4, 1)), 21));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1);
+    __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
+    ven3 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven3_lo), ven3_hi, 1);
+    __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
+    ven4 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven4_lo), ven4_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_mul_ps(vl0, ven0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_mul_ps(vl1, ven1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    __m256 vs2 = _mm256_mul_ps(vl2, ven2);
+    vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
+    __m256 vs3 = _mm256_mul_ps(vl3, ven3);
+    vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
+    __m256 vs4 = _mm256_mul_ps(vl4, ven4);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc4, vt2), vc3);
+    __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc4, vt3), vc3);
+    __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc4, vt4), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vs3 = _mm256_sub_ps(vs3, vone);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vs4 = _mm256_sub_ps(vs4, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    y += 40;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x48.c b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x48.c
new file mode 100644
index 0000000..4f0f011
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x48.c
@@ -0,0 +1,278 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8003F8p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x3));
+  const __m256 vtable = _mm256_set_ps(
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f,
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    x += 48;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+    __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
+    __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
+    __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
+
+    __m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
+    const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
+    const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
+    __m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
+    const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
+    const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
+    __m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2);
+    const __m256 vl2 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2));
+    const __m128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21));
+    __m256 ven3 = _mm256_andnot_ps(vindex_mask, vn3);
+    const __m256 vl3 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3));
+    const __m128 ven3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven3)), 21));
+    __m256 ven4 = _mm256_andnot_ps(vindex_mask, vn4);
+    const __m256 vl4 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4));
+    const __m128 ven4_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven4)), 21));
+    __m256 ven5 = _mm256_andnot_ps(vindex_mask, vn5);
+    const __m256 vl5 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5));
+    const __m128 ven5_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven5)), 21));
+
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m128 ven3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven3, 1)), 21));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m128 ven4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven4, 1)), 21));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m128 ven5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven5, 1)), 21));
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1);
+    __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
+    ven3 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven3_lo), ven3_hi, 1);
+    __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
+    ven4 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven4_lo), ven4_hi, 1);
+    __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
+    ven5 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven5_lo), ven5_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    __m256 vs0 = _mm256_mul_ps(vl0, ven0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    __m256 vs1 = _mm256_mul_ps(vl1, ven1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    __m256 vs2 = _mm256_mul_ps(vl2, ven2);
+    vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
+    __m256 vs3 = _mm256_mul_ps(vl3, ven3);
+    vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
+    __m256 vs4 = _mm256_mul_ps(vl4, ven4);
+    vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
+    __m256 vs5 = _mm256_mul_ps(vl5, ven5);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc4, vt2), vc3);
+    __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc4, vt3), vc3);
+    __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc4, vt4), vc3);
+    __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc4, vt5), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
+    vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vs3 = _mm256_sub_ps(vs3, vone);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vs4 = _mm256_sub_ps(vs4, vone);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vs5 = _mm256_sub_ps(vs5, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
+    vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vt5);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_mul_ps(_mm256_add_ps(vp5, vs5), valpha);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    y += 48;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x8.c b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x8.c
new file mode 100644
index 0000000..7a0f1c8
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-lut4-p4-perm-x8.c
@@ -0,0 +1,128 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8003F8p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vindex_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x3));
+  const __m256 vtable = _mm256_set_ps(
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f,
+    0x1.AE89FAp+0f, 0x1.6A09E6p+0f, 0x1.306FE0p+0f, 0x1.000000p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E400p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    __m256 ven = _mm256_andnot_ps(vindex_mask, vn);
+    const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
+    const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+    const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+    __m256 vs = _mm256_mul_ps(vl, ven);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-p6-x16.c b/src/f32-velu/gen/velu-avx-rr2-p6-x16.c
new file mode 100644
index 0000000..6de5fff
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-p6-x16.c
@@ -0,0 +1,185 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_p6_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E440p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.0105C6p-21f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    x += 16;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-p6-x24.c b/src/f32-velu/gen/velu-avx-rr2-p6-x24.c
new file mode 100644
index 0000000..73011e0
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-p6-x24.c
@@ -0,0 +1,206 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_p6_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E440p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.0105C6p-21f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    x += 24;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 vs2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
+    const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc6, vt2), vc5);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc4);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    y += 24;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-p6-x32.c b/src/f32-velu/gen/velu-avx-rr2-p6-x32.c
new file mode 100644
index 0000000..00f5af5
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-p6-x32.c
@@ -0,0 +1,227 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_p6_x32(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E440p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.0105C6p-21f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    x += 32;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+    __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
+
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 vs2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
+    const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 vs3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
+    const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
+    __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
+    __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs3_lo), vs3_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc6, vt2), vc5);
+    __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc6, vt3), vc5);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc4);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc4);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vs3 = _mm256_sub_ps(vs3, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    y += 32;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-p6-x40.c b/src/f32-velu/gen/velu-avx-rr2-p6-x40.c
new file mode 100644
index 0000000..620192e
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-p6-x40.c
@@ -0,0 +1,248 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_p6_x40(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E440p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.0105C6p-21f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 40 * sizeof(float); n -= 40 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    x += 40;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+    __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
+    __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
+
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 vs2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
+    const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 vs3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
+    const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m128 vs4_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
+    const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
+    __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
+    __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs3_lo), vs3_hi, 1);
+    __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
+    __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs4_lo), vs4_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
+    vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc6, vt2), vc5);
+    __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc6, vt3), vc5);
+    __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc6, vt4), vc5);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc4);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc4);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc4);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vs3 = _mm256_sub_ps(vs3, vone);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vs4 = _mm256_sub_ps(vs4, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    y += 40;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-p6-x48.c b/src/f32-velu/gen/velu-avx-rr2-p6-x48.c
new file mode 100644
index 0000000..1f793dc
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-p6-x48.c
@@ -0,0 +1,269 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_p6_x48(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E440p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.0105C6p-21f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    x += 48;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+
+    __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
+    __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
+    __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
+    __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
+    __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
+    __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
+
+    const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
+    const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
+    const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m128 vs2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
+    const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m128 vs3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
+    const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m128 vs4_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
+    const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m128 vs5_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
+    const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+
+    __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
+    __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
+    __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
+    __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
+    __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
+    __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
+    __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
+    __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs3_lo), vs3_hi, 1);
+    __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
+    __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs4_lo), vs4_hi, 1);
+    __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
+    __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs5_lo), vs5_hi, 1);
+
+    vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
+    vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
+    vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
+    vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
+    vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
+    vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
+
+    __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
+    __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
+    __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc6, vt2), vc5);
+    __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc6, vt3), vc5);
+    __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc6, vt4), vc5);
+    __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc6, vt5), vc5);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc4);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc4);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc4);
+    vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc4);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
+    vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
+    vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vs0 = _mm256_sub_ps(vs0, vone);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vs1 = _mm256_sub_ps(vs1, vone);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vs2 = _mm256_sub_ps(vs2, vone);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vs3 = _mm256_sub_ps(vs3, vone);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vs4 = _mm256_sub_ps(vs4, vone);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vs5 = _mm256_sub_ps(vs5, vone);
+
+    vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
+    vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
+    vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
+    vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
+    vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
+    vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vt5);
+
+    const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_mul_ps(_mm256_add_ps(vp5, vs5), valpha);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    y += 48;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx-rr2-p6-x8.c b/src/f32-velu/gen/velu-avx-rr2-p6-x8.c
new file mode 100644
index 0000000..9a504c9
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx-rr2-p6-x8.c
@@ -0,0 +1,124 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx_rr2_p6_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E440p-1f);
+  const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.0105C6p-21f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+  const __m256 vone = _mm256_set1_ps(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
+    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
+    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
+    __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
+    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_sub_ps(vs, vone);
+    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
+
+    const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x16.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x16.c
new file mode 100644
index 0000000..62d9f3f
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x16.c
@@ -0,0 +1,169 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    x += 16;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+
+    const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
+    const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
+    const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
+    const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
+    __m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x24.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x24.c
new file mode 100644
index 0000000..69b27af
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x24.c
@@ -0,0 +1,187 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    x += 24;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+
+    const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
+    const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
+    const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
+    const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
+    const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
+    const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
+    __m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
+    __m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    y += 24;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x32.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x32.c
new file mode 100644
index 0000000..9e3c52a
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x32.c
@@ -0,0 +1,205 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    x += 32;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+
+    const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
+    const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
+    const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
+    const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
+    const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
+    const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
+    const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
+    const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
+    __m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
+    __m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
+    __m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    y += 32;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x40.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x40.c
new file mode 100644
index 0000000..4877c48
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x40.c
@@ -0,0 +1,223 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 40 * sizeof(float); n -= 40 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    x += 40;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+
+    const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
+    const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
+    const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
+    const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
+    const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
+    const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
+    const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
+    const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
+    const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
+    const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
+    __m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
+    __m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
+    __m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
+    __m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    y += 40;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x48.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x48.c
new file mode 100644
index 0000000..87f6344
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x48.c
@@ -0,0 +1,241 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    x += 48;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+
+    const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
+    const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
+    const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
+    const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
+    const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
+    const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
+    const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
+    const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
+    const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
+    const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
+    const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
+    const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
+    __m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
+    __m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
+    __m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
+    __m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
+    __m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    y += 48;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x56.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x56.c
new file mode 100644
index 0000000..8f763d4
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x56.c
@@ -0,0 +1,259 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 56 * sizeof(float); n -= 56 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    x += 56;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+
+    const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
+    const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
+    const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
+    const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
+    const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
+    const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
+    const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
+    const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
+    const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
+    const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
+    const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
+    const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
+    const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask);
+    const __m256i vl6 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx6, sizeof(float));
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 19);
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
+    __m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
+    __m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
+    __m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
+    __m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
+    __m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
+    __m256 vp6 = _mm256_fmadd_ps(vc3, vt6, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    y += 56;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x64.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x64.c
new file mode 100644
index 0000000..8d01403
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x64.c
@@ -0,0 +1,277 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 64 * sizeof(float); n -= 64 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    x += 64;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+
+    const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
+    const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
+    const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
+    const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
+    const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
+    const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
+    const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
+    const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
+    const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
+    const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
+    const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
+    const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
+    const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask);
+    const __m256i vl6 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx6, sizeof(float));
+    const __m256i vidx7 = _mm256_and_si256(_mm256_castps_si256(vn7), vindex_mask);
+    const __m256i vl7 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx7, sizeof(float));
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 19);
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 19);
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
+    __m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
+    __m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
+    __m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
+    __m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
+    __m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
+    __m256 vp6 = _mm256_fmadd_ps(vc3, vt6, vc2);
+    __m256 vp7 = _mm256_fmadd_ps(vc3, vt7, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    y += 64;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x72.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x72.c
new file mode 100644
index 0000000..5861317
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x72.c
@@ -0,0 +1,295 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 72 * sizeof(float); n -= 72 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    __m256 vx8 = _mm256_loadu_ps(x + 64);
+    x += 72;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+    const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+    __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
+
+    const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
+    const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
+    const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
+    const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
+    const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
+    const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
+    const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
+    const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
+    const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
+    const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
+    const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
+    const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
+    const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask);
+    const __m256i vl6 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx6, sizeof(float));
+    const __m256i vidx7 = _mm256_and_si256(_mm256_castps_si256(vn7), vindex_mask);
+    const __m256i vl7 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx7, sizeof(float));
+    const __m256i vidx8 = _mm256_and_si256(_mm256_castps_si256(vn8), vindex_mask);
+    const __m256i vl8 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx8, sizeof(float));
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 19);
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 19);
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+    const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 19);
+    vn8 = _mm256_sub_ps(vn8, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+    __m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
+    __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
+    __m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
+    __m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
+    __m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
+    __m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
+    __m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
+    __m256 vp6 = _mm256_fmadd_ps(vc3, vt6, vc2);
+    __m256 vp7 = _mm256_fmadd_ps(vc3, vt7, vc2);
+    __m256 vp8 = _mm256_fmadd_ps(vc3, vt8, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+    vp8 = _mm256_mul_ps(vp8, vt8);
+    vt8 = _mm256_mul_ps(vt8, vs8);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+    vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+    const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
+    vx8 = _mm256_mul_ps(vx8, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+    const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    _mm256_storeu_ps(y + 64, vy8);
+    y += 72;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x8.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x8.c
new file mode 100644
index 0000000..5b14ad9
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x8.c
@@ -0,0 +1,118 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x80.c b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x80.c
new file mode 100644
index 0000000..c15211d
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut16-p3-gather-x80.c
@@ -0,0 +1,313 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p19f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vindex_mask = _mm256_set1_epi32(0xF);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.55561Cp-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 80 * sizeof(float); n -= 80 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    __m256 vx8 = _mm256_loadu_ps(x + 64);
+    __m256 vx9 = _mm256_loadu_ps(x + 72);
+    x += 80;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+    const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
+    const __m256 vz9 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx9, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+    __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
+    __m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
+
+    const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
+    const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
+    const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
+    const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
+    const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
+    const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
+    const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
+    const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
+    const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
+    const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
+    const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
+    const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
+    const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask);
+    const __m256i vl6 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx6, sizeof(float));
+    const __m256i vidx7 = _mm256_and_si256(_mm256_castps_si256(vn7), vindex_mask);
+    const __m256i vl7 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx7, sizeof(float));
+    const __m256i vidx8 = _mm256_and_si256(_mm256_castps_si256(vn8), vindex_mask);
+    const __m256i vl8 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx8, sizeof(float));
+    const __m256i vidx9 = _mm256_and_si256(_mm256_castps_si256(vn9), vindex_mask);
+    const __m256i vl9 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx9, sizeof(float));
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 19);
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 19);
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+    const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 19);
+    vn8 = _mm256_sub_ps(vn8, vmagic_bias);
+    const __m256i ven9 = _mm256_slli_epi32(_mm256_castps_si256(vn9), 19);
+    vn9 = _mm256_sub_ps(vn9, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+    __m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
+    __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
+    __m256 vs9 = _mm256_castsi256_ps(_mm256_add_epi32(vl9, ven9));
+    __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
+    __m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
+    __m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
+    __m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
+    __m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
+    __m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
+    __m256 vp6 = _mm256_fmadd_ps(vc3, vt6, vc2);
+    __m256 vp7 = _mm256_fmadd_ps(vc3, vt7, vc2);
+    __m256 vp8 = _mm256_fmadd_ps(vc3, vt8, vc2);
+    __m256 vp9 = _mm256_fmadd_ps(vc3, vt9, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+    vp8 = _mm256_mul_ps(vp8, vt8);
+    vt8 = _mm256_mul_ps(vt8, vs8);
+    vp9 = _mm256_mul_ps(vp9, vt9);
+    vt9 = _mm256_mul_ps(vt9, vs9);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+    vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
+    vs9 = _mm256_fmsub_ps(vs9, valpha, valpha);
+    vp9 = _mm256_fmadd_ps(vp9, vt9, vt9);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+    const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
+    vx8 = _mm256_mul_ps(vx8, vbeta);
+    const __m256 ve9 = _mm256_fmadd_ps(vp9, valpha, vs9);
+    vx9 = _mm256_mul_ps(vx9, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+    const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
+    const __m256 vy9 = _mm256_blendv_ps(vx9, ve9, vx9);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    _mm256_storeu_ps(y + 64, vy8);
+    _mm256_storeu_ps(y + 72, vy9);
+    y += 80;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
+    const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
+
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x16.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x16.c
new file mode 100644
index 0000000..e984db2
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x16.c
@@ -0,0 +1,168 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    x += 16;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
+    const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
+    const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x24.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x24.c
new file mode 100644
index 0000000..a816933
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x24.c
@@ -0,0 +1,186 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    x += 24;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
+    const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
+    const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
+    const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    y += 24;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x32.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x32.c
new file mode 100644
index 0000000..339fc0a
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x32.c
@@ -0,0 +1,204 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    x += 32;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
+    const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
+    const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
+    const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
+    const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    y += 32;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x40.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x40.c
new file mode 100644
index 0000000..9cea9e6
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x40.c
@@ -0,0 +1,222 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 40 * sizeof(float); n -= 40 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    x += 40;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
+    const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
+    const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
+    const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
+    const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
+    const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    y += 40;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x48.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x48.c
new file mode 100644
index 0000000..46a56b9
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x48.c
@@ -0,0 +1,240 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    x += 48;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
+    const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
+    const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
+    const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
+    const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
+    const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
+    const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    y += 48;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x56.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x56.c
new file mode 100644
index 0000000..be557e2
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x56.c
@@ -0,0 +1,258 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 56 * sizeof(float); n -= 56 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    x += 56;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
+    const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
+    const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
+    const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
+    const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
+    const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
+    const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 21);
+    const __m256i vl6 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn6)));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+    __m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    y += 56;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x64.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x64.c
new file mode 100644
index 0000000..0f88092
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x64.c
@@ -0,0 +1,276 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 64 * sizeof(float); n -= 64 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    x += 64;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
+    const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
+    const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
+    const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
+    const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
+    const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
+    const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 21);
+    const __m256i vl6 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn6)));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 21);
+    const __m256i vl7 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn7)));
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+    __m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
+    __m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    y += 64;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x72.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x72.c
new file mode 100644
index 0000000..3c2ca84
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x72.c
@@ -0,0 +1,294 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 72 * sizeof(float); n -= 72 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    __m256 vx8 = _mm256_loadu_ps(x + 64);
+    x += 72;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+    const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+    __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
+    const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
+    const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
+    const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
+    const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
+    const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
+    const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 21);
+    const __m256i vl6 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn6)));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 21);
+    const __m256i vl7 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn7)));
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+    const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 21);
+    const __m256i vl8 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn8)));
+    vn8 = _mm256_sub_ps(vn8, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+    __m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
+    __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+    __m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
+    __m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
+    __m256 vp8 = _mm256_fmadd_ps(vc4, vt8, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+    vp8 = _mm256_mul_ps(vp8, vt8);
+    vt8 = _mm256_mul_ps(vt8, vs8);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+    vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+    const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
+    vx8 = _mm256_mul_ps(vx8, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+    const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    _mm256_storeu_ps(y + 64, vy8);
+    y += 72;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x8.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x8.c
new file mode 100644
index 0000000..c188d51
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x8.c
@@ -0,0 +1,117 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x80.c b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x80.c
new file mode 100644
index 0000000..39cca02
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut4-p4-perm-x80.c
@@ -0,0 +1,312 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p21f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vtable = _mm256_set_ps(
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f,
+    0x1.EE89FAp-1f, 0x1.EA09E6p-1f, 0x1.F06FE0p-1f, 0x1.000000p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.554F9Ap-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.557082p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000002p-1f);
+
+  for (; n >= 80 * sizeof(float); n -= 80 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    __m256 vx8 = _mm256_loadu_ps(x + 64);
+    __m256 vx9 = _mm256_loadu_ps(x + 72);
+    x += 80;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+    const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
+    const __m256 vz9 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx9, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+    __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
+    __m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
+    const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
+    const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
+    const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
+    const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
+    const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
+    const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 21);
+    const __m256i vl6 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn6)));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 21);
+    const __m256i vl7 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn7)));
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+    const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 21);
+    const __m256i vl8 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn8)));
+    vn8 = _mm256_sub_ps(vn8, vmagic_bias);
+    const __m256i ven9 = _mm256_slli_epi32(_mm256_castps_si256(vn9), 21);
+    const __m256i vl9 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn9)));
+    vn9 = _mm256_sub_ps(vn9, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+    __m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
+    __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
+    __m256 vs9 = _mm256_castsi256_ps(_mm256_add_epi32(vl9, ven9));
+    __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+    __m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
+    __m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
+    __m256 vp8 = _mm256_fmadd_ps(vc4, vt8, vc3);
+    __m256 vp9 = _mm256_fmadd_ps(vc4, vt9, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
+    vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+    vp8 = _mm256_mul_ps(vp8, vt8);
+    vt8 = _mm256_mul_ps(vt8, vs8);
+    vp9 = _mm256_mul_ps(vp9, vt9);
+    vt9 = _mm256_mul_ps(vt9, vs9);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+    vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
+    vs9 = _mm256_fmsub_ps(vs9, valpha, valpha);
+    vp9 = _mm256_fmadd_ps(vp9, vt9, vt9);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+    const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
+    vx8 = _mm256_mul_ps(vx8, vbeta);
+    const __m256 ve9 = _mm256_fmadd_ps(vp9, valpha, vs9);
+    vx9 = _mm256_mul_ps(vx9, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+    const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
+    const __m256 vy9 = _mm256_blendv_ps(vx9, ve9, vx9);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    _mm256_storeu_ps(y + 64, vy8);
+    _mm256_storeu_ps(y + 72, vy9);
+    y += 80;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
+    const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x16.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x16.c
new file mode 100644
index 0000000..7a46908
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x16.c
@@ -0,0 +1,167 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    x += 16;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
+    const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
+    const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x24.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x24.c
new file mode 100644
index 0000000..894143c
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x24.c
@@ -0,0 +1,185 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    x += 24;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
+    const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
+    const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
+    const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    y += 24;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x32.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x32.c
new file mode 100644
index 0000000..ddddb4e
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x32.c
@@ -0,0 +1,203 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    x += 32;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
+    const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
+    const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
+    const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
+    const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    y += 32;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x40.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x40.c
new file mode 100644
index 0000000..67882e8
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x40.c
@@ -0,0 +1,221 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 40 * sizeof(float); n -= 40 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    x += 40;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
+    const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
+    const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
+    const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
+    const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
+    const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    y += 40;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x48.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x48.c
new file mode 100644
index 0000000..9b5287a
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x48.c
@@ -0,0 +1,239 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    x += 48;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
+    const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
+    const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
+    const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
+    const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
+    const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
+    const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    y += 48;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x56.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x56.c
new file mode 100644
index 0000000..808b3f3
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x56.c
@@ -0,0 +1,257 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 56 * sizeof(float); n -= 56 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    x += 56;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
+    const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
+    const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
+    const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
+    const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
+    const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
+    const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 20);
+    const __m256i vl6 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn6));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+    __m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    y += 56;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x64.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x64.c
new file mode 100644
index 0000000..3b82586
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x64.c
@@ -0,0 +1,275 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 64 * sizeof(float); n -= 64 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    x += 64;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
+    const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
+    const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
+    const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
+    const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
+    const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
+    const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 20);
+    const __m256i vl6 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn6));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 20);
+    const __m256i vl7 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn7));
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+    __m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
+    __m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    y += 64;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x72.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x72.c
new file mode 100644
index 0000000..300636a
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x72.c
@@ -0,0 +1,293 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 72 * sizeof(float); n -= 72 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    __m256 vx8 = _mm256_loadu_ps(x + 64);
+    x += 72;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+    const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+    __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
+    const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
+    const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
+    const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
+    const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
+    const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
+    const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 20);
+    const __m256i vl6 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn6));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 20);
+    const __m256i vl7 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn7));
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+    const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 20);
+    const __m256i vl8 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn8));
+    vn8 = _mm256_sub_ps(vn8, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+    __m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
+    __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+    __m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
+    __m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
+    __m256 vp8 = _mm256_fmadd_ps(vc4, vt8, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+    vp8 = _mm256_mul_ps(vp8, vt8);
+    vt8 = _mm256_mul_ps(vt8, vs8);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+    vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+    const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
+    vx8 = _mm256_mul_ps(vx8, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+    const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    _mm256_storeu_ps(y + 64, vy8);
+    y += 72;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x8.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x8.c
new file mode 100644
index 0000000..2cf3aa0
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x8.c
@@ -0,0 +1,116 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x80.c b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x80.c
new file mode 100644
index 0000000..a519e72
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-lut8-p4-perm-x80.c
@@ -0,0 +1,311 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256i vtable = _mm256_set_epi32(
+    0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f);
+
+  for (; n >= 80 * sizeof(float); n -= 80 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    __m256 vx8 = _mm256_loadu_ps(x + 64);
+    __m256 vx9 = _mm256_loadu_ps(x + 72);
+    x += 80;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+    const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
+    const __m256 vz9 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx9, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+    __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
+    __m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
+
+    const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
+    const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
+    const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
+    const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
+    const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
+    const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
+    const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 20);
+    const __m256i vl6 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn6));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 20);
+    const __m256i vl7 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn7));
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+    const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 20);
+    const __m256i vl8 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn8));
+    vn8 = _mm256_sub_ps(vn8, vmagic_bias);
+    const __m256i ven9 = _mm256_slli_epi32(_mm256_castps_si256(vn9), 20);
+    const __m256i vl9 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn9));
+    vn9 = _mm256_sub_ps(vn9, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+    __m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
+    __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
+    __m256 vs9 = _mm256_castsi256_ps(_mm256_add_epi32(vl9, ven9));
+    __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
+    __m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
+    __m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
+    __m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
+    __m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
+    __m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
+    __m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
+    __m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
+    __m256 vp8 = _mm256_fmadd_ps(vc4, vt8, vc3);
+    __m256 vp9 = _mm256_fmadd_ps(vc4, vt9, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
+    vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+    vp8 = _mm256_mul_ps(vp8, vt8);
+    vt8 = _mm256_mul_ps(vt8, vs8);
+    vp9 = _mm256_mul_ps(vp9, vt9);
+    vt9 = _mm256_mul_ps(vt9, vs9);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+    vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
+    vs9 = _mm256_fmsub_ps(vs9, valpha, valpha);
+    vp9 = _mm256_fmadd_ps(vp9, vt9, vt9);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+    const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
+    vx8 = _mm256_mul_ps(vx8, vbeta);
+    const __m256 ve9 = _mm256_fmadd_ps(vp9, valpha, vs9);
+    vx9 = _mm256_mul_ps(vx9, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+    const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
+    const __m256 vy9 = _mm256_blendv_ps(vx9, ve9, vx9);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    _mm256_storeu_ps(y + 64, vy8);
+    _mm256_storeu_ps(y + 72, vy9);
+    y += 80;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
+    const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
+    __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x16.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x16.c
new file mode 100644
index 0000000..c1fb793
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x16.c
@@ -0,0 +1,169 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    x += 16;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
+    __m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    y += 16;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x24.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x24.c
new file mode 100644
index 0000000..34e0395
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x24.c
@@ -0,0 +1,187 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    x += 24;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
+    __m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
+    __m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    y += 24;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x32.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x32.c
new file mode 100644
index 0000000..8d2e9c1
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x32.c
@@ -0,0 +1,205 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x32(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    x += 32;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
+    __m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
+    __m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
+    __m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    y += 32;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x40.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x40.c
new file mode 100644
index 0000000..a279053
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x40.c
@@ -0,0 +1,223 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x40(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 40 * sizeof(float); n -= 40 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    x += 40;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
+    __m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
+    __m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
+    __m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
+    __m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    y += 40;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x48.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x48.c
new file mode 100644
index 0000000..bc60458
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x48.c
@@ -0,0 +1,241 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x48(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    x += 48;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
+    __m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
+    __m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
+    __m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
+    __m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
+    __m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    y += 48;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x56.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x56.c
new file mode 100644
index 0000000..c5f5a3d
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x56.c
@@ -0,0 +1,259 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x56(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 56 * sizeof(float); n -= 56 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    x += 56;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
+    __m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
+    __m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
+    __m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
+    __m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
+    __m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
+    __m256 vp6 = _mm256_fmadd_ps(vc6, vt6, vc5);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    y += 56;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x64.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x64.c
new file mode 100644
index 0000000..078e5f4
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x64.c
@@ -0,0 +1,277 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x64(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 64 * sizeof(float); n -= 64 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    x += 64;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
+    __m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
+    __m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
+    __m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
+    __m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
+    __m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
+    __m256 vp6 = _mm256_fmadd_ps(vc6, vt6, vc5);
+    __m256 vp7 = _mm256_fmadd_ps(vc6, vt7, vc5);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    y += 64;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x72.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x72.c
new file mode 100644
index 0000000..6437c97
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x72.c
@@ -0,0 +1,295 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x72(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 72 * sizeof(float); n -= 72 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    __m256 vx8 = _mm256_loadu_ps(x + 64);
+    x += 72;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+    const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+    __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+    __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
+    vn8 = _mm256_sub_ps(vn8, vmagic_bias);
+
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+    __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
+    __m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
+    __m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
+    __m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
+    __m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
+    __m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
+    __m256 vp6 = _mm256_fmadd_ps(vc6, vt6, vc5);
+    __m256 vp7 = _mm256_fmadd_ps(vc6, vt7, vc5);
+    __m256 vp8 = _mm256_fmadd_ps(vc6, vt8, vc5);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+    vp8 = _mm256_mul_ps(vp8, vt8);
+    vt8 = _mm256_mul_ps(vt8, vs8);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+    vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+    const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
+    vx8 = _mm256_mul_ps(vx8, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+    const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    _mm256_storeu_ps(y + 64, vy8);
+    y += 72;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x8.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x8.c
new file mode 100644
index 0000000..b83c147
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x8.c
@@ -0,0 +1,116 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx2-rr1-p6-x80.c b/src/f32-velu/gen/velu-avx2-rr1-p6-x80.c
new file mode 100644
index 0000000..01d238a
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx2-rr1-p6-x80.c
@@ -0,0 +1,313 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx2-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_velu_ukernel__avx2_rr1_p6_x80(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale);
+  const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha);
+  const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta);
+
+  const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f);
+  const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+  const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+  const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f);
+  const __m256 vc6 = _mm256_set1_ps(0x1.6b7338p-10f);
+  const __m256 vc5 = _mm256_set1_ps(0x1.12278Ep-7f);
+  const __m256 vc4 = _mm256_set1_ps(0x1.555716p-5f);
+  const __m256 vc3 = _mm256_set1_ps(0x1.5554B0p-3f);
+  const __m256 vc2 = _mm256_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 80 * sizeof(float); n -= 80 * sizeof(float)) {
+    __m256 vx0 = _mm256_loadu_ps(x);
+    __m256 vx1 = _mm256_loadu_ps(x + 8);
+    __m256 vx2 = _mm256_loadu_ps(x + 16);
+    __m256 vx3 = _mm256_loadu_ps(x + 24);
+    __m256 vx4 = _mm256_loadu_ps(x + 32);
+    __m256 vx5 = _mm256_loadu_ps(x + 40);
+    __m256 vx6 = _mm256_loadu_ps(x + 48);
+    __m256 vx7 = _mm256_loadu_ps(x + 56);
+    __m256 vx8 = _mm256_loadu_ps(x + 64);
+    __m256 vx9 = _mm256_loadu_ps(x + 72);
+    x += 80;
+
+    const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
+    const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
+    const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
+    const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
+    const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
+    const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
+    const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
+    const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
+    const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
+    const __m256 vz9 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx9, vprescale));
+
+    __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
+    __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
+    __m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
+
+    __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+    vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+    __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+    vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+    __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+    vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+    __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+    vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+    __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
+    vn4 = _mm256_sub_ps(vn4, vmagic_bias);
+    __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
+    vn5 = _mm256_sub_ps(vn5, vmagic_bias);
+    __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
+    vn6 = _mm256_sub_ps(vn6, vmagic_bias);
+    __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
+    vn7 = _mm256_sub_ps(vn7, vmagic_bias);
+    __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
+    vn8 = _mm256_sub_ps(vn8, vmagic_bias);
+    __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
+    vn9 = _mm256_sub_ps(vn9, vmagic_bias);
+
+    __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
+    __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
+    __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
+
+    __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
+    __m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
+    __m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
+    __m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
+    __m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
+    __m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
+    __m256 vp6 = _mm256_fmadd_ps(vc6, vt6, vc5);
+    __m256 vp7 = _mm256_fmadd_ps(vc6, vt7, vc5);
+    __m256 vp8 = _mm256_fmadd_ps(vc6, vt8, vc5);
+    __m256 vp9 = _mm256_fmadd_ps(vc6, vt9, vc5);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
+    vp9 = _mm256_fmadd_ps(vp9, vt9, vc4);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
+    vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
+
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
+    vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
+
+    vp0 = _mm256_mul_ps(vp0, vt0);
+    vt0 = _mm256_mul_ps(vt0, vs0);
+    vp1 = _mm256_mul_ps(vp1, vt1);
+    vt1 = _mm256_mul_ps(vt1, vs1);
+    vp2 = _mm256_mul_ps(vp2, vt2);
+    vt2 = _mm256_mul_ps(vt2, vs2);
+    vp3 = _mm256_mul_ps(vp3, vt3);
+    vt3 = _mm256_mul_ps(vt3, vs3);
+    vp4 = _mm256_mul_ps(vp4, vt4);
+    vt4 = _mm256_mul_ps(vt4, vs4);
+    vp5 = _mm256_mul_ps(vp5, vt5);
+    vt5 = _mm256_mul_ps(vt5, vs5);
+    vp6 = _mm256_mul_ps(vp6, vt6);
+    vt6 = _mm256_mul_ps(vt6, vs6);
+    vp7 = _mm256_mul_ps(vp7, vt7);
+    vt7 = _mm256_mul_ps(vt7, vs7);
+    vp8 = _mm256_mul_ps(vp8, vt8);
+    vt8 = _mm256_mul_ps(vt8, vs8);
+    vp9 = _mm256_mul_ps(vp9, vt9);
+    vt9 = _mm256_mul_ps(vt9, vs9);
+
+    vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
+    vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
+    vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
+    vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
+    vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
+    vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
+    vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
+    vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
+    vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
+    vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
+    vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
+    vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
+    vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
+    vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
+    vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
+    vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
+    vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
+    vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
+    vs9 = _mm256_fmsub_ps(vs9, valpha, valpha);
+    vp9 = _mm256_fmadd_ps(vp9, vt9, vt9);
+
+    const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
+    vx0 = _mm256_mul_ps(vx0, vbeta);
+    const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
+    vx1 = _mm256_mul_ps(vx1, vbeta);
+    const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
+    vx2 = _mm256_mul_ps(vx2, vbeta);
+    const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
+    vx3 = _mm256_mul_ps(vx3, vbeta);
+    const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
+    vx4 = _mm256_mul_ps(vx4, vbeta);
+    const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
+    vx5 = _mm256_mul_ps(vx5, vbeta);
+    const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
+    vx6 = _mm256_mul_ps(vx6, vbeta);
+    const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
+    vx7 = _mm256_mul_ps(vx7, vbeta);
+    const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
+    vx8 = _mm256_mul_ps(vx8, vbeta);
+    const __m256 ve9 = _mm256_fmadd_ps(vp9, valpha, vs9);
+    vx9 = _mm256_mul_ps(vx9, vbeta);
+
+    const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
+    const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
+    const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
+    const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
+    const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
+    const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
+    const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
+    const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
+    const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
+    const __m256 vy9 = _mm256_blendv_ps(vx9, ve9, vx9);
+
+    _mm256_storeu_ps(y, vy0);
+    _mm256_storeu_ps(y + 8, vy1);
+    _mm256_storeu_ps(y + 16, vy2);
+    _mm256_storeu_ps(y + 24, vy3);
+    _mm256_storeu_ps(y + 32, vy4);
+    _mm256_storeu_ps(y + 40, vy5);
+    _mm256_storeu_ps(y + 48, vy6);
+    _mm256_storeu_ps(y + 56, vy7);
+    _mm256_storeu_ps(y + 64, vy8);
+    _mm256_storeu_ps(y + 72, vy9);
+    y += 80;
+  }
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m256 vx = _mm256_loadu_ps(x);
+    x += 8;
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    _mm256_storeu_ps(y, vy);
+    y += 8;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 7 * sizeof(float));
+    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
+
+    __m256 vx = _mm256_maskload_ps(x, vmask);
+
+    const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
+
+    __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+    vn = _mm256_sub_ps(vn, vmagic_bias);
+
+    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
+    vp = _mm256_fmadd_ps(vp, vt, vc4);
+    vp = _mm256_fmadd_ps(vp, vt, vc3);
+    vp = _mm256_fmadd_ps(vp, vt, vc2);
+    vp = _mm256_mul_ps(vp, vt);
+
+    vt = _mm256_mul_ps(vt, vs);
+    vs = _mm256_fmsub_ps(vs, valpha, valpha);
+    vp = _mm256_fmadd_ps(vp, vt, vt);
+    const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
+
+    vx = _mm256_mul_ps(vx, vbeta);
+    const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
+
+    // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug).
+    __m128 vy_lo = _mm256_castps256_ps128(vy);
+    if (n & (4 * sizeof(float))) {
+      _mm_storeu_ps(y, vy_lo);
+      vy_lo = _mm256_extractf128_ps(vy, 1);
+      y += 4;
+    }
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy_lo);
+      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy_lo);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x112.c b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x112.c
new file mode 100644
index 0000000..8721020
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x112.c
@@ -0,0 +1,238 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512i vtable = _mm512_set_epi32(
+    0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
+    0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 112 * sizeof(float); n -= 112 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    __m512 vx4 = _mm512_loadu_ps(x + 64);
+    __m512 vx5 = _mm512_loadu_ps(x + 80);
+    __m512 vx6 = _mm512_loadu_ps(x + 96);
+    x += 112;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+    const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
+    const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
+    const __m512 vz6 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx6, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
+
+    const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
+    const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
+    const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
+    const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
+    const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
+    const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
+    const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
+    const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
+    const __m512i ven4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 19);
+    const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
+    const __m512i ven5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 19);
+    const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
+    const __m512i ven6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 19);
+    const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+    __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4));
+    vn4 = _mm512_sub_ps(vn4, vmagic_bias);
+    __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ven5));
+    vn5 = _mm512_sub_ps(vn5, vmagic_bias);
+    __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ven6));
+    vn6 = _mm512_sub_ps(vn6, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
+    __m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
+    __m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
+    __m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
+    __m512 vp4 = _mm512_fmadd_ps(vc3, vt4, vc2);
+    __m512 vp5 = _mm512_fmadd_ps(vc3, vt5, vc2);
+    __m512 vp6 = _mm512_fmadd_ps(vc3, vt6, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+    vp4 = _mm512_mul_ps(vp4, vt4);
+    vt4 = _mm512_mul_ps(vt4, vs4);
+    vp5 = _mm512_mul_ps(vp5, vt5);
+    vt5 = _mm512_mul_ps(vt5, vs5);
+    vp6 = _mm512_mul_ps(vp6, vt6);
+    vt6 = _mm512_mul_ps(vt6, vs6);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+    vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
+    vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
+    vs6 = _mm512_fmsub_ps(vs6, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vt6);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+    __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
+    const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
+    __m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
+    const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
+    __m512 vy6 = _mm512_fmadd_ps(vp6, valpha, vs6);
+    const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+    vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
+    vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
+    vy6 = _mm512_mask_mul_ps(vy6, vsign6, vx6, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    _mm512_storeu_ps(y + 64, vy4);
+    _mm512_storeu_ps(y + 80, vy5);
+    _mm512_storeu_ps(y + 96, vy6);
+    y += 112;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x128.c b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x128.c
new file mode 100644
index 0000000..487252c
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x128.c
@@ -0,0 +1,255 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512i vtable = _mm512_set_epi32(
+    0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
+    0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 128 * sizeof(float); n -= 128 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    __m512 vx4 = _mm512_loadu_ps(x + 64);
+    __m512 vx5 = _mm512_loadu_ps(x + 80);
+    __m512 vx6 = _mm512_loadu_ps(x + 96);
+    __m512 vx7 = _mm512_loadu_ps(x + 112);
+    x += 128;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+    const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
+    const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
+    const __m512 vz6 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx6, vprescale));
+    const __m512 vz7 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx7, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m512 vn7 = _mm512_fmadd_ps(vz7, vlog2e, vmagic_bias);
+
+    const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
+    const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
+    const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
+    const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
+    const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
+    const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
+    const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
+    const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
+    const __m512i ven4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 19);
+    const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
+    const __m512i ven5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 19);
+    const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
+    const __m512i ven6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 19);
+    const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
+    const __m512i ven7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 19);
+    const __m512i vl7 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn7), vtable);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+    __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4));
+    vn4 = _mm512_sub_ps(vn4, vmagic_bias);
+    __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ven5));
+    vn5 = _mm512_sub_ps(vn5, vmagic_bias);
+    __m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ven6));
+    vn6 = _mm512_sub_ps(vn6, vmagic_bias);
+    __m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ven7));
+    vn7 = _mm512_sub_ps(vn7, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vz7);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
+    __m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
+    __m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
+    __m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
+    __m512 vp4 = _mm512_fmadd_ps(vc3, vt4, vc2);
+    __m512 vp5 = _mm512_fmadd_ps(vc3, vt5, vc2);
+    __m512 vp6 = _mm512_fmadd_ps(vc3, vt6, vc2);
+    __m512 vp7 = _mm512_fmadd_ps(vc3, vt7, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+    vp4 = _mm512_mul_ps(vp4, vt4);
+    vt4 = _mm512_mul_ps(vt4, vs4);
+    vp5 = _mm512_mul_ps(vp5, vt5);
+    vt5 = _mm512_mul_ps(vt5, vs5);
+    vp6 = _mm512_mul_ps(vp6, vt6);
+    vt6 = _mm512_mul_ps(vt6, vs6);
+    vp7 = _mm512_mul_ps(vp7, vt7);
+    vt7 = _mm512_mul_ps(vt7, vs7);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+    vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
+    vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
+    vs6 = _mm512_fmsub_ps(vs6, valpha, valpha);
+    vs7 = _mm512_fmsub_ps(vs7, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vt6);
+    vp7 = _mm512_fmadd_ps(vp7, vt7, vt7);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+    __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
+    const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
+    __m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
+    const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
+    __m512 vy6 = _mm512_fmadd_ps(vp6, valpha, vs6);
+    const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US);
+    __m512 vy7 = _mm512_fmadd_ps(vp7, valpha, vs7);
+    const __mmask16 vsign7 = _mm512_cmp_ps_mask(vx7, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+    vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
+    vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
+    vy6 = _mm512_mask_mul_ps(vy6, vsign6, vx6, vbeta);
+    vy7 = _mm512_mask_mul_ps(vy7, vsign7, vx7, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    _mm512_storeu_ps(y + 64, vy4);
+    _mm512_storeu_ps(y + 80, vy5);
+    _mm512_storeu_ps(y + 96, vy6);
+    _mm512_storeu_ps(y + 112, vy7);
+    y += 128;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x16.c b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x16.c
new file mode 100644
index 0000000..1e69096
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x16.c
@@ -0,0 +1,102 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512i vtable = _mm512_set_epi32(
+    0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
+    0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x32.c b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x32.c
new file mode 100644
index 0000000..6a8b8a4
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x32.c
@@ -0,0 +1,153 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512i vtable = _mm512_set_epi32(
+    0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
+    0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    x += 32;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+
+    const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
+    const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
+    const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
+    const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
+    __m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    y += 32;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x48.c b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x48.c
new file mode 100644
index 0000000..d912697
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x48.c
@@ -0,0 +1,170 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512i vtable = _mm512_set_epi32(
+    0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
+    0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    x += 48;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+
+    const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
+    const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
+    const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
+    const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
+    const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
+    const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
+    __m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
+    __m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    y += 48;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x64.c b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x64.c
new file mode 100644
index 0000000..d291dcb
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x64.c
@@ -0,0 +1,187 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512i vtable = _mm512_set_epi32(
+    0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
+    0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 64 * sizeof(float); n -= 64 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    x += 64;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+
+    const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
+    const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
+    const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
+    const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
+    const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
+    const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
+    const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
+    const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
+    __m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
+    __m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
+    __m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    y += 64;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x80.c b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x80.c
new file mode 100644
index 0000000..96cf698
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x80.c
@@ -0,0 +1,204 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512i vtable = _mm512_set_epi32(
+    0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
+    0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 80 * sizeof(float); n -= 80 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    __m512 vx4 = _mm512_loadu_ps(x + 64);
+    x += 80;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+    const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
+
+    const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
+    const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
+    const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
+    const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
+    const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
+    const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
+    const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
+    const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
+    const __m512i ven4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 19);
+    const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+    __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4));
+    vn4 = _mm512_sub_ps(vn4, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
+    __m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
+    __m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
+    __m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
+    __m512 vp4 = _mm512_fmadd_ps(vc3, vt4, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+    vp4 = _mm512_mul_ps(vp4, vt4);
+    vt4 = _mm512_mul_ps(vt4, vs4);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+    vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+    __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
+    const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+    vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    _mm512_storeu_ps(y + 64, vy4);
+    y += 80;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x96.c b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x96.c
new file mode 100644
index 0000000..3c59483
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-lut16-p3-perm-x96.c
@@ -0,0 +1,221 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p19f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512i vtable = _mm512_set_epi32(
+    0x3F7D257D, 0x3F7AC0C7, 0x3F78CCDF, 0x3F7744FD, 0x3F76248C, 0x3F75672A, 0x3F7508A4, 0x3F7504F3,
+    0x3F75583F, 0x3F75FED7, 0x3F76F532, 0x3F7837F0, 0x3F79C3D3, 0x3F7B95C2, 0x3F7DAAC3, 0x3F800000);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.55561Cp-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.0001ECp-1f);
+
+  for (; n >= 96 * sizeof(float); n -= 96 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    __m512 vx4 = _mm512_loadu_ps(x + 64);
+    __m512 vx5 = _mm512_loadu_ps(x + 80);
+    x += 96;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+    const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
+    const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
+
+    const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
+    const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
+    const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
+    const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
+    const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
+    const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
+    const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
+    const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
+    const __m512i ven4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 19);
+    const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
+    const __m512i ven5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 19);
+    const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+    __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4));
+    vn4 = _mm512_sub_ps(vn4, vmagic_bias);
+    __m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ven5));
+    vn5 = _mm512_sub_ps(vn5, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
+    __m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
+    __m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
+    __m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
+    __m512 vp4 = _mm512_fmadd_ps(vc3, vt4, vc2);
+    __m512 vp5 = _mm512_fmadd_ps(vc3, vt5, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+    vp4 = _mm512_mul_ps(vp4, vt4);
+    vt4 = _mm512_mul_ps(vt4, vs4);
+    vp5 = _mm512_mul_ps(vp5, vt5);
+    vt5 = _mm512_mul_ps(vt5, vs5);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+    vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
+    vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+    __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
+    const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
+    __m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
+    const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+    vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
+    vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    _mm512_storeu_ps(y + 64, vy4);
+    _mm512_storeu_ps(y + 80, vy5);
+    y += 96;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
+    __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-p6-x112.c b/src/f32-velu/gen/velu-avx512f-rr1-p6-x112.c
new file mode 100644
index 0000000..bb50c21
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-p6-x112.c
@@ -0,0 +1,249 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_p6_x112(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
+  const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
+  const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 112 * sizeof(float); n -= 112 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    __m512 vx4 = _mm512_loadu_ps(x + 64);
+    __m512 vx5 = _mm512_loadu_ps(x + 80);
+    __m512 vx6 = _mm512_loadu_ps(x + 96);
+    x += 112;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+    const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
+    const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
+    const __m512 vz6 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx6, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+    __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
+    vn4 = _mm512_sub_ps(vn4, vmagic_bias);
+    __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
+    vn5 = _mm512_sub_ps(vn5, vmagic_bias);
+    __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
+    vn6 = _mm512_sub_ps(vn6, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
+    __m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
+    __m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
+    __m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
+    __m512 vp4 = _mm512_fmadd_ps(vc6, vt4, vc5);
+    __m512 vp5 = _mm512_fmadd_ps(vc6, vt5, vc5);
+    __m512 vp6 = _mm512_fmadd_ps(vc6, vt6, vc5);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+    vp4 = _mm512_mul_ps(vp4, vt4);
+    vt4 = _mm512_mul_ps(vt4, vs4);
+    vp5 = _mm512_mul_ps(vp5, vt5);
+    vt5 = _mm512_mul_ps(vt5, vs5);
+    vp6 = _mm512_mul_ps(vp6, vt6);
+    vt6 = _mm512_mul_ps(vt6, vs6);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+    vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
+    vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
+    vs6 = _mm512_fmsub_ps(vs6, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vt6);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+    __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
+    const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
+    __m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
+    const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
+    __m512 vy6 = _mm512_fmadd_ps(vp6, valpha, vs6);
+    const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+    vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
+    vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
+    vy6 = _mm512_mask_mul_ps(vy6, vsign6, vx6, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    _mm512_storeu_ps(y + 64, vy4);
+    _mm512_storeu_ps(y + 80, vy5);
+    _mm512_storeu_ps(y + 96, vy6);
+    y += 112;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-p6-x128.c b/src/f32-velu/gen/velu-avx512f-rr1-p6-x128.c
new file mode 100644
index 0000000..303ef29
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-p6-x128.c
@@ -0,0 +1,267 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_p6_x128(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
+  const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
+  const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 128 * sizeof(float); n -= 128 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    __m512 vx4 = _mm512_loadu_ps(x + 64);
+    __m512 vx5 = _mm512_loadu_ps(x + 80);
+    __m512 vx6 = _mm512_loadu_ps(x + 96);
+    __m512 vx7 = _mm512_loadu_ps(x + 112);
+    x += 128;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+    const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
+    const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
+    const __m512 vz6 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx6, vprescale));
+    const __m512 vz7 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx7, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
+    __m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
+    __m512 vn7 = _mm512_fmadd_ps(vz7, vlog2e, vmagic_bias);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+    __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
+    vn4 = _mm512_sub_ps(vn4, vmagic_bias);
+    __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
+    vn5 = _mm512_sub_ps(vn5, vmagic_bias);
+    __m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
+    vn6 = _mm512_sub_ps(vn6, vmagic_bias);
+    __m512 vs7 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn7), 23));
+    vn7 = _mm512_sub_ps(vn7, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
+    __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
+    __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vz7);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
+    __m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
+    __m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
+    __m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
+    __m512 vp4 = _mm512_fmadd_ps(vc6, vt4, vc5);
+    __m512 vp5 = _mm512_fmadd_ps(vc6, vt5, vc5);
+    __m512 vp6 = _mm512_fmadd_ps(vc6, vt6, vc5);
+    __m512 vp7 = _mm512_fmadd_ps(vc6, vt7, vc5);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
+    vp7 = _mm512_fmadd_ps(vp7, vt7, vc4);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
+    vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
+    vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+    vp4 = _mm512_mul_ps(vp4, vt4);
+    vt4 = _mm512_mul_ps(vt4, vs4);
+    vp5 = _mm512_mul_ps(vp5, vt5);
+    vt5 = _mm512_mul_ps(vt5, vs5);
+    vp6 = _mm512_mul_ps(vp6, vt6);
+    vt6 = _mm512_mul_ps(vt6, vs6);
+    vp7 = _mm512_mul_ps(vp7, vt7);
+    vt7 = _mm512_mul_ps(vt7, vs7);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+    vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
+    vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
+    vs6 = _mm512_fmsub_ps(vs6, valpha, valpha);
+    vs7 = _mm512_fmsub_ps(vs7, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
+    vp6 = _mm512_fmadd_ps(vp6, vt6, vt6);
+    vp7 = _mm512_fmadd_ps(vp7, vt7, vt7);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+    __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
+    const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
+    __m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
+    const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
+    __m512 vy6 = _mm512_fmadd_ps(vp6, valpha, vs6);
+    const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US);
+    __m512 vy7 = _mm512_fmadd_ps(vp7, valpha, vs7);
+    const __mmask16 vsign7 = _mm512_cmp_ps_mask(vx7, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+    vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
+    vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
+    vy6 = _mm512_mask_mul_ps(vy6, vsign6, vx6, vbeta);
+    vy7 = _mm512_mask_mul_ps(vy7, vsign7, vx7, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    _mm512_storeu_ps(y + 64, vy4);
+    _mm512_storeu_ps(y + 80, vy5);
+    _mm512_storeu_ps(y + 96, vy6);
+    _mm512_storeu_ps(y + 112, vy7);
+    y += 128;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-p6-x16.c b/src/f32-velu/gen/velu-avx512f-rr1-p6-x16.c
new file mode 100644
index 0000000..2247f7f
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-p6-x16.c
@@ -0,0 +1,104 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_p6_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
+  const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
+  const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-p6-x32.c b/src/f32-velu/gen/velu-avx512f-rr1-p6-x32.c
new file mode 100644
index 0000000..d0f86dc
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-p6-x32.c
@@ -0,0 +1,159 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_p6_x32(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
+  const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
+  const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    x += 32;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
+    __m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    y += 32;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-p6-x48.c b/src/f32-velu/gen/velu-avx512f-rr1-p6-x48.c
new file mode 100644
index 0000000..1ffbb16
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-p6-x48.c
@@ -0,0 +1,177 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_p6_x48(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
+  const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
+  const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    x += 48;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
+    __m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
+    __m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    y += 48;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-p6-x64.c b/src/f32-velu/gen/velu-avx512f-rr1-p6-x64.c
new file mode 100644
index 0000000..b53b12b
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-p6-x64.c
@@ -0,0 +1,195 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_p6_x64(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
+  const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
+  const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 64 * sizeof(float); n -= 64 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    x += 64;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
+    __m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
+    __m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
+    __m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    y += 64;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-p6-x80.c b/src/f32-velu/gen/velu-avx512f-rr1-p6-x80.c
new file mode 100644
index 0000000..16af097
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-p6-x80.c
@@ -0,0 +1,213 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_p6_x80(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
+  const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
+  const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 80 * sizeof(float); n -= 80 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    __m512 vx4 = _mm512_loadu_ps(x + 64);
+    x += 80;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+    const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+    __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
+    vn4 = _mm512_sub_ps(vn4, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
+    __m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
+    __m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
+    __m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
+    __m512 vp4 = _mm512_fmadd_ps(vc6, vt4, vc5);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+    vp4 = _mm512_mul_ps(vp4, vt4);
+    vt4 = _mm512_mul_ps(vt4, vs4);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+    vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+    __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
+    const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+    vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    _mm512_storeu_ps(y + 64, vy4);
+    y += 80;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-avx512f-rr1-p6-x96.c b/src/f32-velu/gen/velu-avx512f-rr1-p6-x96.c
new file mode 100644
index 0000000..115389b
--- /dev/null
+++ b/src/f32-velu/gen/velu-avx512f-rr1-p6-x96.c
@@ -0,0 +1,231 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/avx512f-rr1-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vunary.h>
+
+
+void xnn_f32_velu_ukernel__avx512f_rr1_p6_x96(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+
+  const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
+  const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
+  const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
+
+  const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
+  const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
+  const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+  const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
+  const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
+  const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
+  const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
+  const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
+  const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
+
+  for (; n >= 96 * sizeof(float); n -= 96 * sizeof(float)) {
+    __m512 vx0 = _mm512_loadu_ps(x);
+    __m512 vx1 = _mm512_loadu_ps(x + 16);
+    __m512 vx2 = _mm512_loadu_ps(x + 32);
+    __m512 vx3 = _mm512_loadu_ps(x + 48);
+    __m512 vx4 = _mm512_loadu_ps(x + 64);
+    __m512 vx5 = _mm512_loadu_ps(x + 80);
+    x += 96;
+
+    const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
+    const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
+    const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
+    const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
+    const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
+    const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
+
+    __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
+    __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
+    __m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
+    __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
+    __m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
+    __m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
+
+    __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
+    vn0 = _mm512_sub_ps(vn0, vmagic_bias);
+    __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
+    vn1 = _mm512_sub_ps(vn1, vmagic_bias);
+    __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
+    vn2 = _mm512_sub_ps(vn2, vmagic_bias);
+    __m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
+    vn3 = _mm512_sub_ps(vn3, vmagic_bias);
+    __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
+    vn4 = _mm512_sub_ps(vn4, vmagic_bias);
+    __m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
+    vn5 = _mm512_sub_ps(vn5, vmagic_bias);
+
+    __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
+    __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
+    __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
+    __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
+    __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
+    __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
+
+    __m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
+    __m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
+    __m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
+    __m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
+    __m512 vp4 = _mm512_fmadd_ps(vc6, vt4, vc5);
+    __m512 vp5 = _mm512_fmadd_ps(vc6, vt5, vc5);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
+
+    vp0 = _mm512_mul_ps(vp0, vt0);
+    vt0 = _mm512_mul_ps(vt0, vs0);
+    vp1 = _mm512_mul_ps(vp1, vt1);
+    vt1 = _mm512_mul_ps(vt1, vs1);
+    vp2 = _mm512_mul_ps(vp2, vt2);
+    vt2 = _mm512_mul_ps(vt2, vs2);
+    vp3 = _mm512_mul_ps(vp3, vt3);
+    vt3 = _mm512_mul_ps(vt3, vs3);
+    vp4 = _mm512_mul_ps(vp4, vt4);
+    vt4 = _mm512_mul_ps(vt4, vs4);
+    vp5 = _mm512_mul_ps(vp5, vt5);
+    vt5 = _mm512_mul_ps(vt5, vs5);
+
+    vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
+    vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
+    vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
+    vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
+    vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
+    vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
+
+    vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
+    vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
+    vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
+    vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
+    vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
+    vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
+
+    const __m512 vzero = _mm512_setzero_ps();
+    __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
+    const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
+    __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
+    const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
+    __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
+    const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
+    __m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
+    const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
+    __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
+    const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
+    __m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
+    const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
+
+    vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
+    vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
+    vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
+    vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
+    vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
+    vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
+
+    _mm512_storeu_ps(y, vy0);
+    _mm512_storeu_ps(y + 16, vy1);
+    _mm512_storeu_ps(y + 32, vy2);
+    _mm512_storeu_ps(y + 48, vy3);
+    _mm512_storeu_ps(y + 64, vy4);
+    _mm512_storeu_ps(y + 80, vy5);
+    y += 96;
+  }
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m512 vx = _mm512_loadu_ps(x);
+    x += 16;
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_storeu_ps(y, vy);
+    y += 16;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    assert(n >= 1 * sizeof(float));
+    assert(n <= 15 * sizeof(float));
+    // Prepare mask for valid 32-bit elements (depends on n).
+    n >>= 2 /* log2(sizeof(float)) */;
+    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
+
+    __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
+
+    const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
+    const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
+
+    __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
+    __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
+    vn = _mm512_sub_ps(vn, vmagic_bias);
+
+    __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
+
+    __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
+    vp = _mm512_fmadd_ps(vp, vt, vc4);
+    vp = _mm512_fmadd_ps(vp, vt, vc3);
+    vp = _mm512_fmadd_ps(vp, vt, vc2);
+    vp = _mm512_mul_ps(vp, vt);
+
+    vt = _mm512_mul_ps(vt, vs);
+    vs = _mm512_fmsub_ps(vs, valpha, valpha);
+    vp = _mm512_fmadd_ps(vp, vt, vt);
+    __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
+
+    vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
+
+    _mm512_mask_storeu_ps(y, vmask, vy);
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x12.c b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x12.c
new file mode 100644
index 0000000..470da06
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x12.c
@@ -0,0 +1,221 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+    const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
+    const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+    const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
+    const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
+    int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
+    vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
+    const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+    float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+    vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
+    float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x16.c b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x16.c
new file mode 100644
index 0000000..f3748ce
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x16.c
@@ -0,0 +1,247 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+    const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
+    const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
+    const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
+    const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+    const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
+    const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
+    int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
+    vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
+    const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
+    const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
+    const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
+    int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
+    vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
+    const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+    float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
+    float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+    vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
+    vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
+    float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
+    float32x4_t vpCDEF = vmlaq_f32(vc2, vc3, vtCDEF);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x20.c b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x20.c
new file mode 100644
index 0000000..8d9192a
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x20.c
@@ -0,0 +1,273 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+    float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+    const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
+    float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+    const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
+    const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
+    const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
+    const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
+    const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask), 2));
+    const int32x4_t venGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+    const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
+    const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
+    int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
+    vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
+    const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
+    const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
+    const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
+    int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
+    vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
+    const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
+    const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
+    const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
+    int32x2_t vlGH = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
+    int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
+    vlGH = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)), vlGH, 1);
+    vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)), vlIJ, 1);
+    const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
+    vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
+    float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vlGHIJ, venGHIJ));
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+    float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
+    float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
+    float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+    vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
+    vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
+    vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
+    float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
+    float32x4_t vpCDEF = vmlaq_f32(vc2, vc3, vtCDEF);
+    float32x4_t vpGHIJ = vmlaq_f32(vc2, vc3, vtGHIJ);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+    vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+    vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
+    vsGHIJ = vsubq_f32(vsGHIJ, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+    const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+    const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
+    vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+    const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+    vst1q_f32(y, vyGHIJ); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x24.c b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x24.c
new file mode 100644
index 0000000..a2d05e1
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x24.c
@@ -0,0 +1,299 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+    float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
+    float32x4_t vxKLMN = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+    const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
+    const float32x4_t vzKLMN = vmaxq_f32(vmulq_f32(vxKLMN, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
+    float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vlog2e);
+    float32x4_t vnKLMN = vmlaq_f32(vmagic_bias, vzKLMN, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+    const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
+    const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
+    const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
+    const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
+    const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask), 2));
+    const int32x4_t venGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 19);
+    const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask), 2));
+    const int32x4_t venKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+    const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
+    const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
+    int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
+    vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
+    const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
+    const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
+    const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
+    int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
+    vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
+    const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
+    const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
+    const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
+    int32x2_t vlGH = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
+    int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
+    vlGH = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)), vlGH, 1);
+    vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)), vlIJ, 1);
+    const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ);
+    const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
+    const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
+    int32x2_t vlKL = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxKL));
+    int32x2_t vlMN = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxMN));
+    vlKL = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxKL >> 32)), vlKL, 1);
+    vlMN = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxMN >> 32)), vlMN, 1);
+    const int32x4_t vlKLMN = vcombine_s32(vlKL, vlMN);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
+    vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
+    float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vlGHIJ, venGHIJ));
+    vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
+    float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vlKLMN, venKLMN));
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+    float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
+    float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
+    float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vminus_ln2_hi);
+    float32x4_t vtKLMN = vmlaq_f32(vzKLMN, vnKLMN, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+    vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
+    vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
+    vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
+    vtKLMN = vmlaq_f32(vtKLMN, vnKLMN, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
+    float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
+    float32x4_t vpCDEF = vmlaq_f32(vc2, vc3, vtCDEF);
+    float32x4_t vpGHIJ = vmlaq_f32(vc2, vc3, vtGHIJ);
+    float32x4_t vpKLMN = vmlaq_f32(vc2, vc3, vtKLMN);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+    vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
+    vpKLMN = vmulq_f32(vpKLMN, vtKLMN);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+    vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
+    vsGHIJ = vsubq_f32(vsGHIJ, vone);
+    vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
+    vsKLMN = vsubq_f32(vsKLMN, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
+    vpKLMN = vmlaq_f32(vtKLMN, vpKLMN, vtKLMN);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+    const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
+    const float32x4_t veKLMN = vmulq_f32(vaddq_f32(vpKLMN, vsKLMN), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+    const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
+    vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
+    const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
+    vxKLMN = vmulq_f32(vxKLMN, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+    const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
+    const float32x4_t vyKLMN = vbslq_f32(vmKLMN, veKLMN, vxKLMN);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+    vst1q_f32(y, vyGHIJ); y += 4;
+    vst1q_f32(y, vyKLMN); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x4.c b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x4.c
new file mode 100644
index 0000000..a523733
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x4.c
@@ -0,0 +1,126 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x8.c b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x8.c
new file mode 100644
index 0000000..cb76a8b
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-lut16-p3-x8.c
@@ -0,0 +1,195 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-p6-x12.c b/src/f32-velu/gen/velu-neon-rr2-p6-x12.c
new file mode 100644
index 0000000..7ea7015
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-p6-x12.c
@@ -0,0 +1,188 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neon_rr2_p6_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E440p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.0105C6p-21f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+    float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+    vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
+    float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
+
+    vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
+
+    vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
+
+    vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-p6-x16.c b/src/f32-velu/gen/velu-neon-rr2-p6-x16.c
new file mode 100644
index 0000000..a0eac35
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-p6-x16.c
@@ -0,0 +1,208 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neon_rr2_p6_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E440p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.0105C6p-21f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+    float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
+    float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+    vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
+    vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
+    float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
+    float32x4_t vpCDEF = vmlaq_f32(vc5, vc6, vtCDEF);
+
+    vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vc4, vpCDEF, vtCDEF);
+
+    vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
+
+    vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-p6-x20.c b/src/f32-velu/gen/velu-neon-rr2-p6-x20.c
new file mode 100644
index 0000000..aa9bc26
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-p6-x20.c
@@ -0,0 +1,228 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neon_rr2_p6_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E440p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.0105C6p-21f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+    float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+    const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
+    float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
+    vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+    float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
+    float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
+    float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+    vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
+    vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
+    vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
+    float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
+    float32x4_t vpCDEF = vmlaq_f32(vc5, vc6, vtCDEF);
+    float32x4_t vpGHIJ = vmlaq_f32(vc5, vc6, vtGHIJ);
+
+    vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vc4, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vc4, vpGHIJ, vtGHIJ);
+
+    vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
+
+    vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+    vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+    vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
+    vsGHIJ = vsubq_f32(vsGHIJ, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+    const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+    const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
+    vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+    const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+    vst1q_f32(y, vyGHIJ); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-p6-x24.c b/src/f32-velu/gen/velu-neon-rr2-p6-x24.c
new file mode 100644
index 0000000..fda5fbc
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-p6-x24.c
@@ -0,0 +1,248 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neon_rr2_p6_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E440p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.0105C6p-21f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+    float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
+    float32x4_t vxKLMN = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+    const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
+    const float32x4_t vzKLMN = vmaxq_f32(vmulq_f32(vxKLMN, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
+    float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vlog2e);
+    float32x4_t vnKLMN = vmlaq_f32(vmagic_bias, vzKLMN, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
+    vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
+    float32x4_t vsKLMN = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 23));
+    vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+    float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
+    float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
+    float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vminus_ln2_hi);
+    float32x4_t vtKLMN = vmlaq_f32(vzKLMN, vnKLMN, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+    vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
+    vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
+    vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
+    vtKLMN = vmlaq_f32(vtKLMN, vnKLMN, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
+    float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
+    float32x4_t vpCDEF = vmlaq_f32(vc5, vc6, vtCDEF);
+    float32x4_t vpGHIJ = vmlaq_f32(vc5, vc6, vtGHIJ);
+    float32x4_t vpKLMN = vmlaq_f32(vc5, vc6, vtKLMN);
+
+    vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vc4, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vc4, vpGHIJ, vtGHIJ);
+    vpKLMN = vmlaq_f32(vc4, vpKLMN, vtKLMN);
+
+    vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
+    vpKLMN = vmlaq_f32(vc3, vpKLMN, vtKLMN);
+
+    vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
+    vpKLMN = vmlaq_f32(vc2, vpKLMN, vtKLMN);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+    vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
+    vpKLMN = vmulq_f32(vpKLMN, vtKLMN);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+    vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
+    vsGHIJ = vsubq_f32(vsGHIJ, vone);
+    vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
+    vsKLMN = vsubq_f32(vsKLMN, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
+    vpGHIJ = vmlaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
+    vpKLMN = vmlaq_f32(vtKLMN, vpKLMN, vtKLMN);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+    const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
+    const float32x4_t veKLMN = vmulq_f32(vaddq_f32(vpKLMN, vsKLMN), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+    const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
+    vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
+    const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
+    vxKLMN = vmulq_f32(vxKLMN, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+    const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
+    const float32x4_t vyKLMN = vbslq_f32(vmKLMN, veKLMN, vxKLMN);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+    vst1q_f32(y, vyGHIJ); y += 4;
+    vst1q_f32(y, vyKLMN); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-p6-x4.c b/src/f32-velu/gen/velu-neon-rr2-p6-x4.c
new file mode 100644
index 0000000..bd19860
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-p6-x4.c
@@ -0,0 +1,110 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neon_rr2_p6_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E440p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.0105C6p-21f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neon-rr2-p6-x8.c b/src/f32-velu/gen/velu-neon-rr2-p6-x8.c
new file mode 100644
index 0000000..b10b8de
--- /dev/null
+++ b/src/f32-velu/gen/velu-neon-rr2-p6-x8.c
@@ -0,0 +1,168 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neon_rr2_p6_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E440p-1f);
+  const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.0105C6p-21f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+
+    float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
+    float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
+
+    vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
+    vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
+
+    float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
+
+    vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
+
+    vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
+
+    vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+
+    vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
+    vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
+    vp = vmlaq_f32(vc4, vp, vt);
+    vp = vmlaq_f32(vc3, vp, vt);
+    vp = vmlaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vmlaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x12.c b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x12.c
new file mode 100644
index 0000000..f454a3d
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x12.c
@@ -0,0 +1,214 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+    const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
+    const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+    const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
+    const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
+    int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
+    vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
+    const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+    float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
+    float32x4_t vp89AB = vfmaq_f32(vc2, vc3, vt89AB);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x16.c b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x16.c
new file mode 100644
index 0000000..9e1acc8
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x16.c
@@ -0,0 +1,239 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+    const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
+    const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
+    const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
+    const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+    const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
+    const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
+    int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
+    vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
+    const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
+    const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
+    const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
+    int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
+    vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
+    const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+    float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
+    float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
+    float32x4_t vp89AB = vfmaq_f32(vc2, vc3, vt89AB);
+    float32x4_t vpCDEF = vfmaq_f32(vc2, vc3, vtCDEF);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x20.c b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x20.c
new file mode 100644
index 0000000..1ea2340
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x20.c
@@ -0,0 +1,264 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+    float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+    const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
+    float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+    const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
+    const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
+    const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
+    const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
+    const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask), 2));
+    const int32x4_t venGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+    const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
+    const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
+    int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
+    vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
+    const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
+    const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
+    const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
+    int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
+    vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
+    const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
+    const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
+    const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
+    int32x2_t vlGH = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
+    int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
+    vlGH = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)), vlGH, 1);
+    vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)), vlIJ, 1);
+    const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
+    vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
+    float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vlGHIJ, venGHIJ));
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+    float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
+    float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
+    float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
+    float32x4_t vp89AB = vfmaq_f32(vc2, vc3, vt89AB);
+    float32x4_t vpCDEF = vfmaq_f32(vc2, vc3, vtCDEF);
+    float32x4_t vpGHIJ = vfmaq_f32(vc2, vc3, vtGHIJ);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+    vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+    vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
+    vsGHIJ = vsubq_f32(vsGHIJ, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+    const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+    const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
+    vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+    const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+    vst1q_f32(y, vyGHIJ); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x24.c b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x24.c
new file mode 100644
index 0000000..7f794bf
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x24.c
@@ -0,0 +1,289 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+    float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
+    float32x4_t vxKLMN = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+    const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
+    const float32x4_t vzKLMN = vmaxq_f32(vmulq_f32(vxKLMN, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
+    float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vlog2e);
+    float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+    const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
+    const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
+    const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
+    const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
+    const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask), 2));
+    const int32x4_t venGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 19);
+    const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask), 2));
+    const int32x4_t venKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+    const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
+    const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
+    int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
+    vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
+    const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
+    const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
+    const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
+    int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
+    vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
+    const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
+    const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
+    const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
+    int32x2_t vlGH = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
+    int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
+    vlGH = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)), vlGH, 1);
+    vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)), vlIJ, 1);
+    const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ);
+    const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
+    const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
+    int32x2_t vlKL = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxKL));
+    int32x2_t vlMN = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxMN));
+    vlKL = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxKL >> 32)), vlKL, 1);
+    vlMN = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxMN >> 32)), vlMN, 1);
+    const int32x4_t vlKLMN = vcombine_s32(vlKL, vlMN);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
+    vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
+    float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vlGHIJ, venGHIJ));
+    vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
+    float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vlKLMN, venKLMN));
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+    float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
+    float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
+    float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vminus_ln2);
+    float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
+    float32x4_t vp89AB = vfmaq_f32(vc2, vc3, vt89AB);
+    float32x4_t vpCDEF = vfmaq_f32(vc2, vc3, vtCDEF);
+    float32x4_t vpGHIJ = vfmaq_f32(vc2, vc3, vtGHIJ);
+    float32x4_t vpKLMN = vfmaq_f32(vc2, vc3, vtKLMN);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+    vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
+    vpKLMN = vmulq_f32(vpKLMN, vtKLMN);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+    vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
+    vsGHIJ = vsubq_f32(vsGHIJ, vone);
+    vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
+    vsKLMN = vsubq_f32(vsKLMN, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
+    vpKLMN = vfmaq_f32(vtKLMN, vpKLMN, vtKLMN);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+    const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
+    const float32x4_t veKLMN = vmulq_f32(vaddq_f32(vpKLMN, vsKLMN), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+    const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
+    vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
+    const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
+    vxKLMN = vmulq_f32(vxKLMN, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+    const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
+    const float32x4_t vyKLMN = vbslq_f32(vmKLMN, veKLMN, vxKLMN);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+    vst1q_f32(y, vyGHIJ); y += 4;
+    vst1q_f32(y, vyKLMN); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x4.c b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x4.c
new file mode 100644
index 0000000..8e74215
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x4.c
@@ -0,0 +1,123 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x8.c b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x8.c
new file mode 100644
index 0000000..1cce576
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-lut16-p3-x8.c
@@ -0,0 +1,189 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+
+    const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
+    const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
+    const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
+    const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
+
+    const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
+    const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
+    int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
+    vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
+    const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
+    const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
+    const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
+    int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
+    vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
+    const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
+
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-p6-x12.c b/src/f32-velu/gen/velu-neonfma-rr1-p6-x12.c
new file mode 100644
index 0000000..7dbbfac
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-p6-x12.c
@@ -0,0 +1,181 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neonfma_rr1_p6_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+    float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
+    float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
+
+    vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
+
+    vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
+
+    vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-p6-x16.c b/src/f32-velu/gen/velu-neonfma-rr1-p6-x16.c
new file mode 100644
index 0000000..e019b9f
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-p6-x16.c
@@ -0,0 +1,200 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neonfma_rr1_p6_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+    float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
+    float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
+    float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
+    float32x4_t vpCDEF = vfmaq_f32(vc5, vc6, vtCDEF);
+
+    vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vc4, vpCDEF, vtCDEF);
+
+    vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
+
+    vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-p6-x20.c b/src/f32-velu/gen/velu-neonfma-rr1-p6-x20.c
new file mode 100644
index 0000000..65d11dc
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-p6-x20.c
@@ -0,0 +1,219 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neonfma_rr1_p6_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+    float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+    const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
+    float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
+    vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+    float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
+    float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
+    float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
+    float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
+    float32x4_t vpCDEF = vfmaq_f32(vc5, vc6, vtCDEF);
+    float32x4_t vpGHIJ = vfmaq_f32(vc5, vc6, vtGHIJ);
+
+    vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vc4, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vc4, vpGHIJ, vtGHIJ);
+
+    vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
+
+    vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+    vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+    vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
+    vsGHIJ = vsubq_f32(vsGHIJ, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+    const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+    const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
+    vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+    const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+    vst1q_f32(y, vyGHIJ); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-p6-x24.c b/src/f32-velu/gen/velu-neonfma-rr1-p6-x24.c
new file mode 100644
index 0000000..c6bb0fa
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-p6-x24.c
@@ -0,0 +1,238 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neonfma_rr1_p6_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+    float32x4_t vx89AB = vld1q_f32(x); x += 4;
+    float32x4_t vxCDEF = vld1q_f32(x); x += 4;
+    float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
+    float32x4_t vxKLMN = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+    const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
+    const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
+    const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
+    const float32x4_t vzKLMN = vmaxq_f32(vmulq_f32(vxKLMN, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+    float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
+    float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
+    float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vlog2e);
+    float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+    float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
+    vn89AB = vsubq_f32(vn89AB, vmagic_bias);
+    float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
+    vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
+    float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
+    vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
+    float32x4_t vsKLMN = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 23));
+    vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+    float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
+    float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
+    float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vminus_ln2);
+    float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
+    float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
+    float32x4_t vpCDEF = vfmaq_f32(vc5, vc6, vtCDEF);
+    float32x4_t vpGHIJ = vfmaq_f32(vc5, vc6, vtGHIJ);
+    float32x4_t vpKLMN = vfmaq_f32(vc5, vc6, vtKLMN);
+
+    vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vc4, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vc4, vpGHIJ, vtGHIJ);
+    vpKLMN = vfmaq_f32(vc4, vpKLMN, vtKLMN);
+
+    vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
+    vpKLMN = vfmaq_f32(vc3, vpKLMN, vtKLMN);
+
+    vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
+    vpKLMN = vfmaq_f32(vc2, vpKLMN, vtKLMN);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+    vp89AB = vmulq_f32(vp89AB, vt89AB);
+    vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
+    vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
+    vpKLMN = vmulq_f32(vpKLMN, vtKLMN);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+    vt89AB = vmulq_f32(vt89AB, vs89AB);
+    vs89AB = vsubq_f32(vs89AB, vone);
+    vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
+    vsCDEF = vsubq_f32(vsCDEF, vone);
+    vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
+    vsGHIJ = vsubq_f32(vsGHIJ, vone);
+    vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
+    vsKLMN = vsubq_f32(vsKLMN, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+    vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
+    vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
+    vpGHIJ = vfmaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
+    vpKLMN = vfmaq_f32(vtKLMN, vpKLMN, vtKLMN);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+    const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
+    const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
+    const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
+    const float32x4_t veKLMN = vmulq_f32(vaddq_f32(vpKLMN, vsKLMN), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+    const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
+    vx89AB = vmulq_f32(vx89AB, vbeta);
+    const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
+    vxCDEF = vmulq_f32(vxCDEF, vbeta);
+    const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
+    vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
+    const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
+    vxKLMN = vmulq_f32(vxKLMN, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+    const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
+    const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
+    const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
+    const float32x4_t vyKLMN = vbslq_f32(vmKLMN, veKLMN, vxKLMN);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+    vst1q_f32(y, vy89AB); y += 4;
+    vst1q_f32(y, vyCDEF); y += 4;
+    vst1q_f32(y, vyGHIJ); y += 4;
+    vst1q_f32(y, vyKLMN); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-p6-x4.c b/src/f32-velu/gen/velu-neonfma-rr1-p6-x4.c
new file mode 100644
index 0000000..d9c028f
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-p6-x4.c
@@ -0,0 +1,107 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neonfma_rr1_p6_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-neonfma-rr1-p6-x8.c b/src/f32-velu/gen/velu-neonfma-rr1-p6-x8.c
new file mode 100644
index 0000000..825c461
--- /dev/null
+++ b/src/f32-velu/gen/velu-neonfma-rr1-p6-x8.c
@@ -0,0 +1,162 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/neon-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__neonfma_rr1_p6_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    float32x4_t vx0123 = vld1q_f32(x); x += 4;
+    float32x4_t vx4567 = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
+    const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
+
+    float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
+    float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
+
+    float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
+    vn0123 = vsubq_f32(vn0123, vmagic_bias);
+    float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
+    vn4567 = vsubq_f32(vn4567, vmagic_bias);
+
+    float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
+    float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
+
+    float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
+    float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
+
+    vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
+
+    vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
+
+    vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
+
+    vp0123 = vmulq_f32(vp0123, vt0123);
+    vp4567 = vmulq_f32(vp4567, vt4567);
+
+    vt0123 = vmulq_f32(vt0123, vs0123);
+    vs0123 = vsubq_f32(vs0123, vone);
+    vt4567 = vmulq_f32(vt4567, vs4567);
+    vs4567 = vsubq_f32(vs4567, vone);
+
+    vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
+    vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
+
+    const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
+    const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
+
+    const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
+    vx0123 = vmulq_f32(vx0123, vbeta);
+    const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
+    vx4567 = vmulq_f32(vx4567, vbeta);
+
+    const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
+    const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
+
+    vst1q_f32(y, vy0123); y += 4;
+    vst1q_f32(y, vy4567); y += 4;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
+
+    float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
+    vp = vfmaq_f32(vc4, vp, vt);
+    vp = vfmaq_f32(vc3, vp, vt);
+    vp = vfmaq_f32(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = vfmaq_f32(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x1.c b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x1.c
new file mode 100644
index 0000000..124a6a5
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x1.c
@@ -0,0 +1,79 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  do {
+    float vx = *x++;
+
+    const float vz = vx * vprescale;
+
+    float vn = vz * vlog2e + vmagic_bias;
+    const uint32_t ven = fp32_to_bits(vn) << 19;
+    const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+    vn -= vmagic_bias;
+
+    float vt = vn * vminus_ln2_hi + vz;
+    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+    vt = vn * vminus_ln2_lo + vt;
+    if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
+    float vp = vc3 * vt + vc2;
+    vp *= vt;
+
+    vt *= vs;
+    vs -= vone;
+    vp = vp * vt + vt;
+    const float ve = (vp + vs) * valpha;
+
+    float vy = vx * vbeta;
+    if XNN_UNPREDICTABLE(vx < 0.0f) {
+      vy = ve;
+    }
+
+    *y++ = vy;
+
+    n -= sizeof(float);
+  } while (n != 0);
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x2.c b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x2.c
new file mode 100644
index 0000000..a6636bf
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x2.c
@@ -0,0 +1,141 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    x += 2;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float vx = *x;
+
+    const float vz = vx * vprescale;
+
+    float vn = vz * vlog2e + vmagic_bias;
+    const uint32_t ven = fp32_to_bits(vn) << 19;
+    const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+    vn -= vmagic_bias;
+
+    float vt = vn * vminus_ln2_hi + vz;
+    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+    vt = vn * vminus_ln2_lo + vt;
+    if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
+    float vp = vc3 * vt + vc2;
+    vp *= vt;
+
+    vt *= vs;
+    vs -= vone;
+    vp = vp * vt + vt;
+    const float ve = (vp + vs) * valpha;
+
+    float vy = vx * vbeta;
+    if XNN_UNPREDICTABLE(vx < 0.0f) {
+      vy = ve;
+    }
+
+    *y = vy;
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x3.c b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x3.c
new file mode 100644
index 0000000..c33c3a9
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x3.c
@@ -0,0 +1,169 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 3 * sizeof(float); n -= 3 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    x += 3;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+    const float vz2 = vx2 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+    const uint32_t ven2 = fp32_to_bits(vn2) << 19;
+    const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
+    vn2 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
+      vs2 = 0.0f;
+      vt2 = 0.0f;
+    }
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+    float vp2 = vc3 * vt2 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = vx2 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+    if XNN_UNPREDICTABLE(vx2 < 0.0f) {
+      vy2 = ve2;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y += 3;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+      if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+        vs = 0.0f;
+        vt = 0.0f;
+      }
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = vx * vbeta;
+      if XNN_UNPREDICTABLE(vx < 0.0f) {
+        vy = ve;
+      }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x4.c b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x4.c
new file mode 100644
index 0000000..bb4cfde
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x4.c
@@ -0,0 +1,193 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    x += 4;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+    const float vz2 = vx2 * vprescale;
+    const float vz3 = vx3 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+    const uint32_t ven2 = fp32_to_bits(vn2) << 19;
+    const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
+    vn2 -= vmagic_bias;
+    const uint32_t ven3 = fp32_to_bits(vn3) << 19;
+    const uint32_t vidx3 = fp32_to_bits(vn3) & vindex_mask;
+    vn3 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vs3 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
+      vs2 = 0.0f;
+      vt2 = 0.0f;
+    }
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+    if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
+      vs3 = 0.0f;
+      vt3 = 0.0f;
+    }
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+    float vp2 = vc3 * vt2 + vc2;
+    float vp3 = vc3 * vt3 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = vx2 * vbeta;
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = vx3 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+    if XNN_UNPREDICTABLE(vx2 < 0.0f) {
+      vy2 = ve2;
+    }
+    if XNN_UNPREDICTABLE(vx3 < 0.0f) {
+      vy3 = ve3;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+      if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+        vs = 0.0f;
+        vt = 0.0f;
+      }
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = vx * vbeta;
+      if XNN_UNPREDICTABLE(vx < 0.0f) {
+        vy = ve;
+      }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x5.c b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x5.c
new file mode 100644
index 0000000..8357e43
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x5.c
@@ -0,0 +1,217 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 5 * sizeof(float); n -= 5 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    float vx4 = x[4];
+    x += 5;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+    const float vz2 = vx2 * vprescale;
+    const float vz3 = vx3 * vprescale;
+    const float vz4 = vx4 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+    float vn4 = vz4 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+    const uint32_t ven2 = fp32_to_bits(vn2) << 19;
+    const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
+    vn2 -= vmagic_bias;
+    const uint32_t ven3 = fp32_to_bits(vn3) << 19;
+    const uint32_t vidx3 = fp32_to_bits(vn3) & vindex_mask;
+    vn3 -= vmagic_bias;
+    const uint32_t ven4 = fp32_to_bits(vn4) << 19;
+    const uint32_t vidx4 = fp32_to_bits(vn4) & vindex_mask;
+    vn4 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vs3 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
+    float vt4 = vn4 * vminus_ln2_hi + vz4;
+    float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
+      vs2 = 0.0f;
+      vt2 = 0.0f;
+    }
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+    if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
+      vs3 = 0.0f;
+      vt3 = 0.0f;
+    }
+    vt4 = vn4 * vminus_ln2_lo + vt4;
+    if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
+      vs4 = 0.0f;
+      vt4 = 0.0f;
+    }
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+    float vp2 = vc3 * vt2 + vc2;
+    float vp3 = vc3 * vt3 + vc2;
+    float vp4 = vc3 * vt4 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+    vp4 *= vt4;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+    vt4 *= vs4;
+    vs4 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+    vp4 = vp4 * vt4 + vt4;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = vx2 * vbeta;
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = vx3 * vbeta;
+    const float ve4 = (vp4 + vs4) * valpha;
+    float vy4 = vx4 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+    if XNN_UNPREDICTABLE(vx2 < 0.0f) {
+      vy2 = ve2;
+    }
+    if XNN_UNPREDICTABLE(vx3 < 0.0f) {
+      vy3 = ve3;
+    }
+    if XNN_UNPREDICTABLE(vx4 < 0.0f) {
+      vy4 = ve4;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y[4] = vy4;
+    y += 5;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+      if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+        vs = 0.0f;
+        vt = 0.0f;
+      }
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = vx * vbeta;
+      if XNN_UNPREDICTABLE(vx < 0.0f) {
+        vy = ve;
+      }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x6.c b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x6.c
new file mode 100644
index 0000000..c294782
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x6.c
@@ -0,0 +1,241 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 6 * sizeof(float); n -= 6 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    float vx4 = x[4];
+    float vx5 = x[5];
+    x += 6;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+    const float vz2 = vx2 * vprescale;
+    const float vz3 = vx3 * vprescale;
+    const float vz4 = vx4 * vprescale;
+    const float vz5 = vx5 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+    float vn4 = vz4 * vlog2e + vmagic_bias;
+    float vn5 = vz5 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+    const uint32_t ven2 = fp32_to_bits(vn2) << 19;
+    const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
+    vn2 -= vmagic_bias;
+    const uint32_t ven3 = fp32_to_bits(vn3) << 19;
+    const uint32_t vidx3 = fp32_to_bits(vn3) & vindex_mask;
+    vn3 -= vmagic_bias;
+    const uint32_t ven4 = fp32_to_bits(vn4) << 19;
+    const uint32_t vidx4 = fp32_to_bits(vn4) & vindex_mask;
+    vn4 -= vmagic_bias;
+    const uint32_t ven5 = fp32_to_bits(vn5) << 19;
+    const uint32_t vidx5 = fp32_to_bits(vn5) & vindex_mask;
+    vn5 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vs3 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
+    float vt4 = vn4 * vminus_ln2_hi + vz4;
+    float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
+    float vt5 = vn5 * vminus_ln2_hi + vz5;
+    float vs5 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx5] + ven5);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
+      vs2 = 0.0f;
+      vt2 = 0.0f;
+    }
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+    if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
+      vs3 = 0.0f;
+      vt3 = 0.0f;
+    }
+    vt4 = vn4 * vminus_ln2_lo + vt4;
+    if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
+      vs4 = 0.0f;
+      vt4 = 0.0f;
+    }
+    vt5 = vn5 * vminus_ln2_lo + vt5;
+    if XNN_UNPREDICTABLE(vz5 <= vsat_cutoff) {
+      vs5 = 0.0f;
+      vt5 = 0.0f;
+    }
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+    float vp2 = vc3 * vt2 + vc2;
+    float vp3 = vc3 * vt3 + vc2;
+    float vp4 = vc3 * vt4 + vc2;
+    float vp5 = vc3 * vt5 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+    vp4 *= vt4;
+    vp5 *= vt5;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+    vt4 *= vs4;
+    vs4 -= vone;
+    vt5 *= vs5;
+    vs5 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+    vp4 = vp4 * vt4 + vt4;
+    vp5 = vp5 * vt5 + vt5;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = vx2 * vbeta;
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = vx3 * vbeta;
+    const float ve4 = (vp4 + vs4) * valpha;
+    float vy4 = vx4 * vbeta;
+    const float ve5 = (vp5 + vs5) * valpha;
+    float vy5 = vx5 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+    if XNN_UNPREDICTABLE(vx2 < 0.0f) {
+      vy2 = ve2;
+    }
+    if XNN_UNPREDICTABLE(vx3 < 0.0f) {
+      vy3 = ve3;
+    }
+    if XNN_UNPREDICTABLE(vx4 < 0.0f) {
+      vy4 = ve4;
+    }
+    if XNN_UNPREDICTABLE(vx5 < 0.0f) {
+      vy5 = ve5;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y[4] = vy4;
+    y[5] = vy5;
+    y += 6;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+      if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+        vs = 0.0f;
+        vt = 0.0f;
+      }
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = vx * vbeta;
+      if XNN_UNPREDICTABLE(vx < 0.0f) {
+        vy = ve;
+      }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-p6-x1.c b/src/f32-velu/gen/velu-scalar-rr2-p6-x1.c
new file mode 100644
index 0000000..7a086e0
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-p6-x1.c
@@ -0,0 +1,80 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__scalar_rr2_p6_x1(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  do {
+    float vx = *x++;
+
+    const float vz = vx * vprescale;
+
+    float vn = vz * vlog2e + vmagic_bias;
+    float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+    vn -= vmagic_bias;
+
+    float vt = vn * vminus_ln2_hi + vz;
+    vt = vn * vminus_ln2_lo + vt;
+
+    if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
+    float vp = vc6 * vt + vc5;
+    vp = vp * vt + vc4;
+    vp = vp * vt + vc3;
+    vp = vp * vt + vc2;
+    vp *= vt;
+
+    vt *= vs;
+    vs -= vone;
+    vp = vp * vt + vt;
+    const float ve = (vp + vs) * valpha;
+
+    float vy = vx * vbeta;
+    if XNN_UNPREDICTABLE(vx < 0.0f) {
+      vy = ve;
+    }
+
+    *y++ = vy;
+
+    n -= sizeof(float);
+  } while (n != 0);
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-p6-x2.c b/src/f32-velu/gen/velu-scalar-rr2-p6-x2.c
new file mode 100644
index 0000000..14c3dd8
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-p6-x2.c
@@ -0,0 +1,148 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__scalar_rr2_p6_x2(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    x += 2;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float vx = *x;
+
+    const float vz = vx * vprescale;
+
+    float vn = vz * vlog2e + vmagic_bias;
+    float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+    vn -= vmagic_bias;
+
+    float vt = vn * vminus_ln2_hi + vz;
+    vt = vn * vminus_ln2_lo + vt;
+
+    if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
+    float vp = vc6 * vt + vc5;
+    vp = vp * vt + vc4;
+    vp = vp * vt + vc3;
+    vp = vp * vt + vc2;
+    vp *= vt;
+
+    vt *= vs;
+    vs -= vone;
+    vp = vp * vt + vt;
+    const float ve = (vp + vs) * valpha;
+
+    float vy = vx * vbeta;
+    if XNN_UNPREDICTABLE(vx < 0.0f) {
+      vy = ve;
+    }
+
+    *y = vy;
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-p6-x3.c b/src/f32-velu/gen/velu-scalar-rr2-p6-x3.c
new file mode 100644
index 0000000..9007c96
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-p6-x3.c
@@ -0,0 +1,177 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__scalar_rr2_p6_x3(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 3 * sizeof(float); n -= 3 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    x += 3;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+    const float vz2 = vx2 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+    float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23);
+    vn2 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
+      vs2 = 0.0f;
+      vt2 = 0.0f;
+    }
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+    float vp2 = vc6 * vt2 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+    vp2 = vp2 * vt2 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+    vp2 = vp2 * vt2 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+    vp2 = vp2 * vt2 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = vx2 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+    if XNN_UNPREDICTABLE(vx2 < 0.0f) {
+      vy2 = ve2;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y += 3;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+      if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+        vs = 0.0f;
+        vt = 0.0f;
+      }
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = vx * vbeta;
+      if XNN_UNPREDICTABLE(vx < 0.0f) {
+        vy = ve;
+      }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-p6-x4.c b/src/f32-velu/gen/velu-scalar-rr2-p6-x4.c
new file mode 100644
index 0000000..7a8da59
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-p6-x4.c
@@ -0,0 +1,202 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__scalar_rr2_p6_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    x += 4;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+    const float vz2 = vx2 * vprescale;
+    const float vz3 = vx3 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+    float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23);
+    vn2 -= vmagic_bias;
+    float vs3 = fp32_from_bits(fp32_to_bits(vn3) << 23);
+    vn3 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
+      vs2 = 0.0f;
+      vt2 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
+      vs3 = 0.0f;
+      vt3 = 0.0f;
+    }
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+    float vp2 = vc6 * vt2 + vc5;
+    float vp3 = vc6 * vt3 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+    vp2 = vp2 * vt2 + vc4;
+    vp3 = vp3 * vt3 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+    vp2 = vp2 * vt2 + vc3;
+    vp3 = vp3 * vt3 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+    vp2 = vp2 * vt2 + vc2;
+    vp3 = vp3 * vt3 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = vx2 * vbeta;
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = vx3 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+    if XNN_UNPREDICTABLE(vx2 < 0.0f) {
+      vy2 = ve2;
+    }
+    if XNN_UNPREDICTABLE(vx3 < 0.0f) {
+      vy3 = ve3;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+      if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+        vs = 0.0f;
+        vt = 0.0f;
+      }
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = vx * vbeta;
+      if XNN_UNPREDICTABLE(vx < 0.0f) {
+        vy = ve;
+      }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-p6-x5.c b/src/f32-velu/gen/velu-scalar-rr2-p6-x5.c
new file mode 100644
index 0000000..c837e8c
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-p6-x5.c
@@ -0,0 +1,227 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__scalar_rr2_p6_x5(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 5 * sizeof(float); n -= 5 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    float vx4 = x[4];
+    x += 5;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+    const float vz2 = vx2 * vprescale;
+    const float vz3 = vx3 * vprescale;
+    const float vz4 = vx4 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+    float vn4 = vz4 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+    float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23);
+    vn2 -= vmagic_bias;
+    float vs3 = fp32_from_bits(fp32_to_bits(vn3) << 23);
+    vn3 -= vmagic_bias;
+    float vs4 = fp32_from_bits(fp32_to_bits(vn4) << 23);
+    vn4 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vt4 = vn4 * vminus_ln2_hi + vz4;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+    vt4 = vn4 * vminus_ln2_lo + vt4;
+
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
+      vs2 = 0.0f;
+      vt2 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
+      vs3 = 0.0f;
+      vt3 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
+      vs4 = 0.0f;
+      vt4 = 0.0f;
+    }
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+    float vp2 = vc6 * vt2 + vc5;
+    float vp3 = vc6 * vt3 + vc5;
+    float vp4 = vc6 * vt4 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+    vp2 = vp2 * vt2 + vc4;
+    vp3 = vp3 * vt3 + vc4;
+    vp4 = vp4 * vt4 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+    vp2 = vp2 * vt2 + vc3;
+    vp3 = vp3 * vt3 + vc3;
+    vp4 = vp4 * vt4 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+    vp2 = vp2 * vt2 + vc2;
+    vp3 = vp3 * vt3 + vc2;
+    vp4 = vp4 * vt4 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+    vp4 *= vt4;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+    vt4 *= vs4;
+    vs4 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+    vp4 = vp4 * vt4 + vt4;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = vx2 * vbeta;
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = vx3 * vbeta;
+    const float ve4 = (vp4 + vs4) * valpha;
+    float vy4 = vx4 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+    if XNN_UNPREDICTABLE(vx2 < 0.0f) {
+      vy2 = ve2;
+    }
+    if XNN_UNPREDICTABLE(vx3 < 0.0f) {
+      vy3 = ve3;
+    }
+    if XNN_UNPREDICTABLE(vx4 < 0.0f) {
+      vy4 = ve4;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y[4] = vy4;
+    y += 5;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+      if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+        vs = 0.0f;
+        vt = 0.0f;
+      }
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = vx * vbeta;
+      if XNN_UNPREDICTABLE(vx < 0.0f) {
+        vy = ve;
+      }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-scalar-rr2-p6-x6.c b/src/f32-velu/gen/velu-scalar-rr2-p6-x6.c
new file mode 100644
index 0000000..d8484a7
--- /dev/null
+++ b/src/f32-velu/gen/velu-scalar-rr2-p6-x6.c
@@ -0,0 +1,252 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__scalar_rr2_p6_x6(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 6 * sizeof(float); n -= 6 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    float vx4 = x[4];
+    float vx5 = x[5];
+    x += 6;
+
+    const float vz0 = vx0 * vprescale;
+    const float vz1 = vx1 * vprescale;
+    const float vz2 = vx2 * vprescale;
+    const float vz3 = vx3 * vprescale;
+    const float vz4 = vx4 * vprescale;
+    const float vz5 = vx5 * vprescale;
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+    float vn4 = vz4 * vlog2e + vmagic_bias;
+    float vn5 = vz5 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+    float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23);
+    vn2 -= vmagic_bias;
+    float vs3 = fp32_from_bits(fp32_to_bits(vn3) << 23);
+    vn3 -= vmagic_bias;
+    float vs4 = fp32_from_bits(fp32_to_bits(vn4) << 23);
+    vn4 -= vmagic_bias;
+    float vs5 = fp32_from_bits(fp32_to_bits(vn5) << 23);
+    vn5 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vt4 = vn4 * vminus_ln2_hi + vz4;
+    float vt5 = vn5 * vminus_ln2_hi + vz5;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+    vt4 = vn4 * vminus_ln2_lo + vt4;
+    vt5 = vn5 * vminus_ln2_lo + vt5;
+
+    if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
+      vs0 = 0.0f;
+      vt0 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
+      vs1 = 0.0f;
+      vt1 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
+      vs2 = 0.0f;
+      vt2 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
+      vs3 = 0.0f;
+      vt3 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
+      vs4 = 0.0f;
+      vt4 = 0.0f;
+    }
+    if XNN_UNPREDICTABLE(vz5 <= vsat_cutoff) {
+      vs5 = 0.0f;
+      vt5 = 0.0f;
+    }
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+    float vp2 = vc6 * vt2 + vc5;
+    float vp3 = vc6 * vt3 + vc5;
+    float vp4 = vc6 * vt4 + vc5;
+    float vp5 = vc6 * vt5 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+    vp2 = vp2 * vt2 + vc4;
+    vp3 = vp3 * vt3 + vc4;
+    vp4 = vp4 * vt4 + vc4;
+    vp5 = vp5 * vt5 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+    vp2 = vp2 * vt2 + vc3;
+    vp3 = vp3 * vt3 + vc3;
+    vp4 = vp4 * vt4 + vc3;
+    vp5 = vp5 * vt5 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+    vp2 = vp2 * vt2 + vc2;
+    vp3 = vp3 * vt3 + vc2;
+    vp4 = vp4 * vt4 + vc2;
+    vp5 = vp5 * vt5 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+    vp4 *= vt4;
+    vp5 *= vt5;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+    vt4 *= vs4;
+    vs4 -= vone;
+    vt5 *= vs5;
+    vs5 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+    vp4 = vp4 * vt4 + vt4;
+    vp5 = vp5 * vt5 + vt5;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = vx0 * vbeta;
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = vx1 * vbeta;
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = vx2 * vbeta;
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = vx3 * vbeta;
+    const float ve4 = (vp4 + vs4) * valpha;
+    float vy4 = vx4 * vbeta;
+    const float ve5 = (vp5 + vs5) * valpha;
+    float vy5 = vx5 * vbeta;
+
+    if XNN_UNPREDICTABLE(vx0 < 0.0f) {
+      vy0 = ve0;
+    }
+    if XNN_UNPREDICTABLE(vx1 < 0.0f) {
+      vy1 = ve1;
+    }
+    if XNN_UNPREDICTABLE(vx2 < 0.0f) {
+      vy2 = ve2;
+    }
+    if XNN_UNPREDICTABLE(vx3 < 0.0f) {
+      vy3 = ve3;
+    }
+    if XNN_UNPREDICTABLE(vx4 < 0.0f) {
+      vy4 = ve4;
+    }
+    if XNN_UNPREDICTABLE(vx5 < 0.0f) {
+      vy5 = ve5;
+    }
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y[4] = vy4;
+    y[5] = vy5;
+    y += 6;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+      if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+        vs = 0.0f;
+        vt = 0.0f;
+      }
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = vx * vbeta;
+      if XNN_UNPREDICTABLE(vx < 0.0f) {
+        vy = ve;
+      }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x12.c b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x12.c
new file mode 100644
index 0000000..57fa9c3
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x12.c
@@ -0,0 +1,287 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    x += 12;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+    const __m128i vidx89AB = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask), 2);
+    const __m128i ven89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx0123, vidx0123));
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx4567, vidx4567));
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
+      const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx89AB, vidx89AB));
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB)));
+      const __m128i vl9 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32))));
+      const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
+      const __m128i vlB = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32))));
+      const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
+      const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
+      const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
+      const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx8)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxA)));
+      const __m128i vl9 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx9)));
+      const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
+      const __m128i vlB = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxB)));
+      const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ven89AB));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc3, vt89AB), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+    const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    y += 12;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x16.c b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x16.c
new file mode 100644
index 0000000..7f27543
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x16.c
@@ -0,0 +1,326 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    x += 16;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+    const __m128i vidx89AB = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask), 2);
+    const __m128i ven89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 19);
+    const __m128i vidxCDEF = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask), 2);
+    const __m128i venCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx0123, vidx0123));
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx4567, vidx4567));
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
+      const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx89AB, vidx89AB));
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB)));
+      const __m128i vl9 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32))));
+      const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
+      const __m128i vlB = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32))));
+      const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
+      const uint64_t vidxEF = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidxCDEF, vidxCDEF));
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF)));
+      const __m128i vlD = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32))));
+      const __m128i vlCD = _mm_unpacklo_epi32(vlC, vlD);
+      const __m128i vlF = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32))));
+      const __m128i vlEF = _mm_unpacklo_epi32(vlE, vlF);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
+      const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
+      const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
+      const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx8)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxA)));
+      const __m128i vl9 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx9)));
+      const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
+      const __m128i vlB = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxB)));
+      const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
+      const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
+      const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
+      const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxC)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxE)));
+      const __m128i vlD = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxD)));
+      const __m128i vlCD = _mm_unpacklo_epi32(vlC, vlD);
+      const __m128i vlF = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxF)));
+      const __m128i vlEF = _mm_unpacklo_epi32(vlE, vlF);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ven89AB));
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, venCDEF));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc3, vt89AB), vc2);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc3, vtCDEF), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+    const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
+    const __m128 vyCDEF = _mm_or_ps(_mm_and_ps(veCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, vxCDEF));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    y += 16;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x20.c b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x20.c
new file mode 100644
index 0000000..7f83f2d
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x20.c
@@ -0,0 +1,365 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    __m128 vxGHIJ = _mm_loadu_ps(x + 16);
+    x += 20;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+    const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+    __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+    const __m128i vidx89AB = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask), 2);
+    const __m128i ven89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 19);
+    const __m128i vidxCDEF = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask), 2);
+    const __m128i venCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 19);
+    const __m128i vidxGHIJ = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnGHIJ), vindex_mask), 2);
+    const __m128i venGHIJ = _mm_slli_epi32(_mm_castps_si128(vnGHIJ), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx0123, vidx0123));
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx4567, vidx4567));
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
+      const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx89AB, vidx89AB));
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB)));
+      const __m128i vl9 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32))));
+      const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
+      const __m128i vlB = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32))));
+      const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
+      const uint64_t vidxEF = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidxCDEF, vidxCDEF));
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF)));
+      const __m128i vlD = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32))));
+      const __m128i vlCD = _mm_unpacklo_epi32(vlC, vlD);
+      const __m128i vlF = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32))));
+      const __m128i vlEF = _mm_unpacklo_epi32(vlE, vlF);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+      const uint64_t vidxGH = (uint64_t) _mm_cvtsi128_si64(vidxGHIJ);
+      const uint64_t vidxIJ = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidxGHIJ, vidxGHIJ));
+      const __m128i vlG   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH)));
+      const __m128i vlI = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ)));
+      const __m128i vlH = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32))));
+      const __m128i vlGH = _mm_unpacklo_epi32(vlG, vlH);
+      const __m128i vlJ = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32))));
+      const __m128i vlIJ = _mm_unpacklo_epi32(vlI, vlJ);
+      const __m128i vlGHIJ = _mm_unpacklo_epi64(vlGH, vlIJ);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
+      const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
+      const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
+      const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx8)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxA)));
+      const __m128i vl9 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx9)));
+      const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
+      const __m128i vlB = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxB)));
+      const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
+      const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
+      const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
+      const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxC)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxE)));
+      const __m128i vlD = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxD)));
+      const __m128i vlCD = _mm_unpacklo_epi32(vlC, vlD);
+      const __m128i vlF = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxF)));
+      const __m128i vlEF = _mm_unpacklo_epi32(vlE, vlF);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+      const uint32_t vidxG = (uint32_t) _mm_cvtsi128_si32(vidxGHIJ);
+      const uint32_t vidxH = (uint32_t) _mm_extract_epi16(vidxGHIJ, 2);
+      const uint32_t vidxI = (uint32_t) _mm_extract_epi16(vidxGHIJ, 4);
+      const uint32_t vidxJ = (uint32_t) _mm_extract_epi16(vidxGHIJ, 6);
+      const __m128i vlG   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxG)));
+      const __m128i vlI = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxI)));
+      const __m128i vlH = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxH)));
+      const __m128i vlGH = _mm_unpacklo_epi32(vlG, vlH);
+      const __m128i vlJ = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxJ)));
+      const __m128i vlIJ = _mm_unpacklo_epi32(vlI, vlJ);
+      const __m128i vlGHIJ = _mm_unpacklo_epi64(vlGH, vlIJ);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ven89AB));
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, venCDEF));
+    vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
+    __m128 vsGHIJ = _mm_castsi128_ps(_mm_add_epi32(vlGHIJ, venGHIJ));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+    __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc3, vt89AB), vc2);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc3, vtCDEF), vc2);
+    __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc3, vtGHIJ), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+    vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+    vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
+    vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+    const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+    const __m128 vmGHIJ = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxGHIJ)));
+    vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+    const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
+    const __m128 vyCDEF = _mm_or_ps(_mm_and_ps(veCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, vxCDEF));
+    const __m128 vyGHIJ = _mm_or_ps(_mm_and_ps(veGHIJ, vmGHIJ), _mm_andnot_ps(vmGHIJ, vxGHIJ));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    _mm_storeu_ps(y + 16, vyGHIJ);
+    y += 20;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x24.c b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x24.c
new file mode 100644
index 0000000..cc3b426
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x24.c
@@ -0,0 +1,404 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    __m128 vxGHIJ = _mm_loadu_ps(x + 16);
+    __m128 vxKLMN = _mm_loadu_ps(x + 20);
+    x += 24;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+    const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
+    const __m128 vzKLMN = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxKLMN, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+    __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
+    __m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+    const __m128i vidx89AB = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask), 2);
+    const __m128i ven89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 19);
+    const __m128i vidxCDEF = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask), 2);
+    const __m128i venCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 19);
+    const __m128i vidxGHIJ = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnGHIJ), vindex_mask), 2);
+    const __m128i venGHIJ = _mm_slli_epi32(_mm_castps_si128(vnGHIJ), 19);
+    const __m128i vidxKLMN = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnKLMN), vindex_mask), 2);
+    const __m128i venKLMN = _mm_slli_epi32(_mm_castps_si128(vnKLMN), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx0123, vidx0123));
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx4567, vidx4567));
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
+      const uint64_t vidxAB = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx89AB, vidx89AB));
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB)));
+      const __m128i vl9 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32))));
+      const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
+      const __m128i vlB = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32))));
+      const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
+      const uint64_t vidxEF = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidxCDEF, vidxCDEF));
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF)));
+      const __m128i vlD = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32))));
+      const __m128i vlCD = _mm_unpacklo_epi32(vlC, vlD);
+      const __m128i vlF = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32))));
+      const __m128i vlEF = _mm_unpacklo_epi32(vlE, vlF);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+      const uint64_t vidxGH = (uint64_t) _mm_cvtsi128_si64(vidxGHIJ);
+      const uint64_t vidxIJ = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidxGHIJ, vidxGHIJ));
+      const __m128i vlG   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH)));
+      const __m128i vlI = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ)));
+      const __m128i vlH = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32))));
+      const __m128i vlGH = _mm_unpacklo_epi32(vlG, vlH);
+      const __m128i vlJ = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32))));
+      const __m128i vlIJ = _mm_unpacklo_epi32(vlI, vlJ);
+      const __m128i vlGHIJ = _mm_unpacklo_epi64(vlGH, vlIJ);
+      const uint64_t vidxKL = (uint64_t) _mm_cvtsi128_si64(vidxKLMN);
+      const uint64_t vidxMN = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidxKLMN, vidxKLMN));
+      const __m128i vlK   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxKL)));
+      const __m128i vlM = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxMN)));
+      const __m128i vlL = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxKL >> 32))));
+      const __m128i vlKL = _mm_unpacklo_epi32(vlK, vlL);
+      const __m128i vlN = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxMN >> 32))));
+      const __m128i vlMN = _mm_unpacklo_epi32(vlM, vlN);
+      const __m128i vlKLMN = _mm_unpacklo_epi64(vlKL, vlMN);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
+      const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
+      const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
+      const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx8)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxA)));
+      const __m128i vl9 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx9)));
+      const __m128i vl89 = _mm_unpacklo_epi32(vl8, vl9);
+      const __m128i vlB = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxB)));
+      const __m128i vlAB = _mm_unpacklo_epi32(vlA, vlB);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
+      const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
+      const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
+      const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxC)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxE)));
+      const __m128i vlD = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxD)));
+      const __m128i vlCD = _mm_unpacklo_epi32(vlC, vlD);
+      const __m128i vlF = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxF)));
+      const __m128i vlEF = _mm_unpacklo_epi32(vlE, vlF);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+      const uint32_t vidxG = (uint32_t) _mm_cvtsi128_si32(vidxGHIJ);
+      const uint32_t vidxH = (uint32_t) _mm_extract_epi16(vidxGHIJ, 2);
+      const uint32_t vidxI = (uint32_t) _mm_extract_epi16(vidxGHIJ, 4);
+      const uint32_t vidxJ = (uint32_t) _mm_extract_epi16(vidxGHIJ, 6);
+      const __m128i vlG   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxG)));
+      const __m128i vlI = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxI)));
+      const __m128i vlH = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxH)));
+      const __m128i vlGH = _mm_unpacklo_epi32(vlG, vlH);
+      const __m128i vlJ = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxJ)));
+      const __m128i vlIJ = _mm_unpacklo_epi32(vlI, vlJ);
+      const __m128i vlGHIJ = _mm_unpacklo_epi64(vlGH, vlIJ);
+      const uint32_t vidxK = (uint32_t) _mm_cvtsi128_si32(vidxKLMN);
+      const uint32_t vidxL = (uint32_t) _mm_extract_epi16(vidxKLMN, 2);
+      const uint32_t vidxM = (uint32_t) _mm_extract_epi16(vidxKLMN, 4);
+      const uint32_t vidxN = (uint32_t) _mm_extract_epi16(vidxKLMN, 6);
+      const __m128i vlK   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxK)));
+      const __m128i vlM = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxM)));
+      const __m128i vlL = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxL)));
+      const __m128i vlKL = _mm_unpacklo_epi32(vlK, vlL);
+      const __m128i vlN = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxN)));
+      const __m128i vlMN = _mm_unpacklo_epi32(vlM, vlN);
+      const __m128i vlKLMN = _mm_unpacklo_epi64(vlKL, vlMN);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ven89AB));
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, venCDEF));
+    vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
+    __m128 vsGHIJ = _mm_castsi128_ps(_mm_add_epi32(vlGHIJ, venGHIJ));
+    vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
+    __m128 vsKLMN = _mm_castsi128_ps(_mm_add_epi32(vlKLMN, venKLMN));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+    __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    __m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_hi), vzKLMN);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_lo), vtKLMN);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc3, vt89AB), vc2);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc3, vtCDEF), vc2);
+    __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc3, vtGHIJ), vc2);
+    __m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc3, vtKLMN), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+    vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
+    vpKLMN = _mm_mul_ps(vpKLMN, vtKLMN);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+    vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
+    vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
+    vtKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
+    vsKLMN = _mm_sub_ps(vsKLMN, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vtKLMN);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+    const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
+    const __m128 veKLMN = _mm_mul_ps(_mm_add_ps(vpKLMN, vsKLMN), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+    const __m128 vmGHIJ = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxGHIJ)));
+    vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
+    const __m128 vmKLMN = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxKLMN)));
+    vxKLMN = _mm_mul_ps(vxKLMN, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+    const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
+    const __m128 vyCDEF = _mm_or_ps(_mm_and_ps(veCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, vxCDEF));
+    const __m128 vyGHIJ = _mm_or_ps(_mm_and_ps(veGHIJ, vmGHIJ), _mm_andnot_ps(vmGHIJ, vxGHIJ));
+    const __m128 vyKLMN = _mm_or_ps(_mm_and_ps(veKLMN, vmKLMN), _mm_andnot_ps(vmKLMN, vxKLMN));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    _mm_storeu_ps(y + 16, vyGHIJ);
+    _mm_storeu_ps(y + 20, vyKLMN);
+    y += 24;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x4.c b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x4.c
new file mode 100644
index 0000000..a57f2a8
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x4.c
@@ -0,0 +1,148 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x8.c b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x8.c
new file mode 100644
index 0000000..765ba8f
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-lut16-p3-x8.c
@@ -0,0 +1,248 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    x += 8;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx0123, vidx0123));
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx4567, vidx4567));
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl1 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)));
+      const __m128i vl01 = _mm_unpacklo_epi32(vl0, vl1);
+      const __m128i vl3 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)));
+      const __m128i vl23 = _mm_unpacklo_epi32(vl2, vl3);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl5 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)));
+      const __m128i vl45 = _mm_unpacklo_epi32(vl4, vl5);
+      const __m128i vl7 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)));
+      const __m128i vl67 = _mm_unpacklo_epi32(vl6, vl7);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+      const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+      const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-p6-x12.c b/src/f32-velu/gen/velu-sse2-rr2-p6-x12.c
new file mode 100644
index 0000000..a456a2a
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-p6-x12.c
@@ -0,0 +1,193 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse2_rr2_p6_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    x += 12;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+    __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+    const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    y += 12;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-p6-x16.c b/src/f32-velu/gen/velu-sse2-rr2-p6-x16.c
new file mode 100644
index 0000000..b1997c9
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-p6-x16.c
@@ -0,0 +1,213 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse2_rr2_p6_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    x += 16;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+    __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+    const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
+    const __m128 vyCDEF = _mm_or_ps(_mm_and_ps(veCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, vxCDEF));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    y += 16;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-p6-x20.c b/src/f32-velu/gen/velu-sse2-rr2-p6-x20.c
new file mode 100644
index 0000000..db77625
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-p6-x20.c
@@ -0,0 +1,233 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse2_rr2_p6_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    __m128 vxGHIJ = _mm_loadu_ps(x + 16);
+    x += 20;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+    const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+    __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+    __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
+    __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+    __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
+    __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+    vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+    vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
+    vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+    const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+    const __m128 vmGHIJ = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxGHIJ)));
+    vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+    const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
+    const __m128 vyCDEF = _mm_or_ps(_mm_and_ps(veCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, vxCDEF));
+    const __m128 vyGHIJ = _mm_or_ps(_mm_and_ps(veGHIJ, vmGHIJ), _mm_andnot_ps(vmGHIJ, vxGHIJ));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    _mm_storeu_ps(y + 16, vyGHIJ);
+    y += 20;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-p6-x24.c b/src/f32-velu/gen/velu-sse2-rr2-p6-x24.c
new file mode 100644
index 0000000..0436c39
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-p6-x24.c
@@ -0,0 +1,253 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse2_rr2_p6_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    __m128 vxGHIJ = _mm_loadu_ps(x + 16);
+    __m128 vxKLMN = _mm_loadu_ps(x + 20);
+    x += 24;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+    const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
+    const __m128 vzKLMN = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxKLMN, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+    __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
+    __m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+    __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
+    __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
+    __m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
+    vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+    __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    __m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_hi), vzKLMN);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_lo), vtKLMN);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
+    __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
+    __m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc6, vtKLMN), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+    vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
+    vpKLMN = _mm_mul_ps(vpKLMN, vtKLMN);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+    vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
+    vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
+    vtKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
+    vsKLMN = _mm_sub_ps(vsKLMN, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vtKLMN);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+    const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
+    const __m128 veKLMN = _mm_mul_ps(_mm_add_ps(vpKLMN, vsKLMN), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+    const __m128 vmGHIJ = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxGHIJ)));
+    vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
+    const __m128 vmKLMN = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxKLMN)));
+    vxKLMN = _mm_mul_ps(vxKLMN, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+    const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
+    const __m128 vyCDEF = _mm_or_ps(_mm_and_ps(veCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, vxCDEF));
+    const __m128 vyGHIJ = _mm_or_ps(_mm_and_ps(veGHIJ, vmGHIJ), _mm_andnot_ps(vmGHIJ, vxGHIJ));
+    const __m128 vyKLMN = _mm_or_ps(_mm_and_ps(veKLMN, vmKLMN), _mm_andnot_ps(vmKLMN, vxKLMN));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    _mm_storeu_ps(y + 16, vyGHIJ);
+    _mm_storeu_ps(y + 20, vyKLMN);
+    y += 24;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-p6-x4.c b/src/f32-velu/gen/velu-sse2-rr2-p6-x4.c
new file mode 100644
index 0000000..fceaa23
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-p6-x4.c
@@ -0,0 +1,112 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse2_rr2_p6_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse2-rr2-p6-x8.c b/src/f32-velu/gen/velu-sse2-rr2-p6-x8.c
new file mode 100644
index 0000000..2aea300
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse2-rr2-p6-x8.c
@@ -0,0 +1,173 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse2_rr2_p6_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    x += 8;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+
+    const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+
+    const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
+    const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x12.c b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x12.c
new file mode 100644
index 0000000..1ca7833
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x12.c
@@ -0,0 +1,262 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    x += 12;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+    const __m128i vidx89AB = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask), 2);
+    const __m128i ven89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_extract_epi64(vidx0123, 1);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_extract_epi64(vidx4567, 1);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
+      const uint64_t vidxAB = (uint64_t) _mm_extract_epi64(vidx89AB, 1);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB)));
+      const __m128i vl89 = _mm_insert_epi32(vl8, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32))), 1);
+      const __m128i vlAB = _mm_insert_epi32(vlA, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32))), 1);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
+      const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
+      const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
+      const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx8)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxA)));
+      const __m128i vl89 = _mm_insert_epi32(vl8, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx9)), 1);
+      const __m128i vlAB = _mm_insert_epi32(vlA, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxB)), 1);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ven89AB));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc3, vt89AB), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+    const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    y += 12;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x16.c b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x16.c
new file mode 100644
index 0000000..a45009e
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x16.c
@@ -0,0 +1,296 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    x += 16;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+    const __m128i vidx89AB = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask), 2);
+    const __m128i ven89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 19);
+    const __m128i vidxCDEF = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask), 2);
+    const __m128i venCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_extract_epi64(vidx0123, 1);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_extract_epi64(vidx4567, 1);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
+      const uint64_t vidxAB = (uint64_t) _mm_extract_epi64(vidx89AB, 1);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB)));
+      const __m128i vl89 = _mm_insert_epi32(vl8, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32))), 1);
+      const __m128i vlAB = _mm_insert_epi32(vlA, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32))), 1);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
+      const uint64_t vidxEF = (uint64_t) _mm_extract_epi64(vidxCDEF, 1);
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF)));
+      const __m128i vlCD = _mm_insert_epi32(vlC, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32))), 1);
+      const __m128i vlEF = _mm_insert_epi32(vlE, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32))), 1);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
+      const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
+      const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
+      const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx8)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxA)));
+      const __m128i vl89 = _mm_insert_epi32(vl8, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx9)), 1);
+      const __m128i vlAB = _mm_insert_epi32(vlA, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxB)), 1);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
+      const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
+      const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
+      const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxC)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxE)));
+      const __m128i vlCD = _mm_insert_epi32(vlC, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxD)), 1);
+      const __m128i vlEF = _mm_insert_epi32(vlE, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxF)), 1);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ven89AB));
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, venCDEF));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc3, vt89AB), vc2);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc3, vtCDEF), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+    const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
+    const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    y += 16;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x20.c b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x20.c
new file mode 100644
index 0000000..9ff7ef9
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x20.c
@@ -0,0 +1,330 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    __m128 vxGHIJ = _mm_loadu_ps(x + 16);
+    x += 20;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+    const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+    __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+    const __m128i vidx89AB = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask), 2);
+    const __m128i ven89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 19);
+    const __m128i vidxCDEF = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask), 2);
+    const __m128i venCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 19);
+    const __m128i vidxGHIJ = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnGHIJ), vindex_mask), 2);
+    const __m128i venGHIJ = _mm_slli_epi32(_mm_castps_si128(vnGHIJ), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_extract_epi64(vidx0123, 1);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_extract_epi64(vidx4567, 1);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
+      const uint64_t vidxAB = (uint64_t) _mm_extract_epi64(vidx89AB, 1);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB)));
+      const __m128i vl89 = _mm_insert_epi32(vl8, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32))), 1);
+      const __m128i vlAB = _mm_insert_epi32(vlA, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32))), 1);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
+      const uint64_t vidxEF = (uint64_t) _mm_extract_epi64(vidxCDEF, 1);
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF)));
+      const __m128i vlCD = _mm_insert_epi32(vlC, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32))), 1);
+      const __m128i vlEF = _mm_insert_epi32(vlE, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32))), 1);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+      const uint64_t vidxGH = (uint64_t) _mm_cvtsi128_si64(vidxGHIJ);
+      const uint64_t vidxIJ = (uint64_t) _mm_extract_epi64(vidxGHIJ, 1);
+      const __m128i vlG   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH)));
+      const __m128i vlI = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ)));
+      const __m128i vlGH = _mm_insert_epi32(vlG, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32))), 1);
+      const __m128i vlIJ = _mm_insert_epi32(vlI, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32))), 1);
+      const __m128i vlGHIJ = _mm_unpacklo_epi64(vlGH, vlIJ);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
+      const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
+      const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
+      const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx8)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxA)));
+      const __m128i vl89 = _mm_insert_epi32(vl8, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx9)), 1);
+      const __m128i vlAB = _mm_insert_epi32(vlA, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxB)), 1);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
+      const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
+      const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
+      const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxC)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxE)));
+      const __m128i vlCD = _mm_insert_epi32(vlC, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxD)), 1);
+      const __m128i vlEF = _mm_insert_epi32(vlE, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxF)), 1);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+      const uint32_t vidxG = (uint32_t) _mm_cvtsi128_si32(vidxGHIJ);
+      const uint32_t vidxH = (uint32_t) _mm_extract_epi16(vidxGHIJ, 2);
+      const uint32_t vidxI = (uint32_t) _mm_extract_epi16(vidxGHIJ, 4);
+      const uint32_t vidxJ = (uint32_t) _mm_extract_epi16(vidxGHIJ, 6);
+      const __m128i vlG   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxG)));
+      const __m128i vlI = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxI)));
+      const __m128i vlGH = _mm_insert_epi32(vlG, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxH)), 1);
+      const __m128i vlIJ = _mm_insert_epi32(vlI, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxJ)), 1);
+      const __m128i vlGHIJ = _mm_unpacklo_epi64(vlGH, vlIJ);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ven89AB));
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, venCDEF));
+    vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
+    __m128 vsGHIJ = _mm_castsi128_ps(_mm_add_epi32(vlGHIJ, venGHIJ));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+    __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc3, vt89AB), vc2);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc3, vtCDEF), vc2);
+    __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc3, vtGHIJ), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+    vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+    vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
+    vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+    const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+    vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+    const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
+    const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
+    const __m128 vyGHIJ = _mm_blendv_ps(vxGHIJ, veGHIJ, vxGHIJ);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    _mm_storeu_ps(y + 16, vyGHIJ);
+    y += 20;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x24.c b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x24.c
new file mode 100644
index 0000000..518c397
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x24.c
@@ -0,0 +1,364 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    __m128 vxGHIJ = _mm_loadu_ps(x + 16);
+    __m128 vxKLMN = _mm_loadu_ps(x + 20);
+    x += 24;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+    const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
+    const __m128 vzKLMN = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxKLMN, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+    __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
+    __m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+    const __m128i vidx89AB = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn89AB), vindex_mask), 2);
+    const __m128i ven89AB = _mm_slli_epi32(_mm_castps_si128(vn89AB), 19);
+    const __m128i vidxCDEF = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnCDEF), vindex_mask), 2);
+    const __m128i venCDEF = _mm_slli_epi32(_mm_castps_si128(vnCDEF), 19);
+    const __m128i vidxGHIJ = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnGHIJ), vindex_mask), 2);
+    const __m128i venGHIJ = _mm_slli_epi32(_mm_castps_si128(vnGHIJ), 19);
+    const __m128i vidxKLMN = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vnKLMN), vindex_mask), 2);
+    const __m128i venKLMN = _mm_slli_epi32(_mm_castps_si128(vnKLMN), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_extract_epi64(vidx0123, 1);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_extract_epi64(vidx4567, 1);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint64_t vidx89 = (uint64_t) _mm_cvtsi128_si64(vidx89AB);
+      const uint64_t vidxAB = (uint64_t) _mm_extract_epi64(vidx89AB, 1);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB)));
+      const __m128i vl89 = _mm_insert_epi32(vl8, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32))), 1);
+      const __m128i vlAB = _mm_insert_epi32(vlA, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32))), 1);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint64_t vidxCD = (uint64_t) _mm_cvtsi128_si64(vidxCDEF);
+      const uint64_t vidxEF = (uint64_t) _mm_extract_epi64(vidxCDEF, 1);
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF)));
+      const __m128i vlCD = _mm_insert_epi32(vlC, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32))), 1);
+      const __m128i vlEF = _mm_insert_epi32(vlE, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32))), 1);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+      const uint64_t vidxGH = (uint64_t) _mm_cvtsi128_si64(vidxGHIJ);
+      const uint64_t vidxIJ = (uint64_t) _mm_extract_epi64(vidxGHIJ, 1);
+      const __m128i vlG   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH)));
+      const __m128i vlI = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ)));
+      const __m128i vlGH = _mm_insert_epi32(vlG, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32))), 1);
+      const __m128i vlIJ = _mm_insert_epi32(vlI, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32))), 1);
+      const __m128i vlGHIJ = _mm_unpacklo_epi64(vlGH, vlIJ);
+      const uint64_t vidxKL = (uint64_t) _mm_cvtsi128_si64(vidxKLMN);
+      const uint64_t vidxMN = (uint64_t) _mm_extract_epi64(vidxKLMN, 1);
+      const __m128i vlK   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxKL)));
+      const __m128i vlM = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxMN)));
+      const __m128i vlKL = _mm_insert_epi32(vlK, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxKL >> 32))), 1);
+      const __m128i vlMN = _mm_insert_epi32(vlM, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxMN >> 32))), 1);
+      const __m128i vlKLMN = _mm_unpacklo_epi64(vlKL, vlMN);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+      const uint32_t vidx8 = (uint32_t) _mm_cvtsi128_si32(vidx89AB);
+      const uint32_t vidx9 = (uint32_t) _mm_extract_epi16(vidx89AB, 2);
+      const uint32_t vidxA = (uint32_t) _mm_extract_epi16(vidx89AB, 4);
+      const uint32_t vidxB = (uint32_t) _mm_extract_epi16(vidx89AB, 6);
+      const __m128i vl8   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx8)));
+      const __m128i vlA = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxA)));
+      const __m128i vl89 = _mm_insert_epi32(vl8, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx9)), 1);
+      const __m128i vlAB = _mm_insert_epi32(vlA, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxB)), 1);
+      const __m128i vl89AB = _mm_unpacklo_epi64(vl89, vlAB);
+      const uint32_t vidxC = (uint32_t) _mm_cvtsi128_si32(vidxCDEF);
+      const uint32_t vidxD = (uint32_t) _mm_extract_epi16(vidxCDEF, 2);
+      const uint32_t vidxE = (uint32_t) _mm_extract_epi16(vidxCDEF, 4);
+      const uint32_t vidxF = (uint32_t) _mm_extract_epi16(vidxCDEF, 6);
+      const __m128i vlC   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxC)));
+      const __m128i vlE = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxE)));
+      const __m128i vlCD = _mm_insert_epi32(vlC, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxD)), 1);
+      const __m128i vlEF = _mm_insert_epi32(vlE, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxF)), 1);
+      const __m128i vlCDEF = _mm_unpacklo_epi64(vlCD, vlEF);
+      const uint32_t vidxG = (uint32_t) _mm_cvtsi128_si32(vidxGHIJ);
+      const uint32_t vidxH = (uint32_t) _mm_extract_epi16(vidxGHIJ, 2);
+      const uint32_t vidxI = (uint32_t) _mm_extract_epi16(vidxGHIJ, 4);
+      const uint32_t vidxJ = (uint32_t) _mm_extract_epi16(vidxGHIJ, 6);
+      const __m128i vlG   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxG)));
+      const __m128i vlI = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxI)));
+      const __m128i vlGH = _mm_insert_epi32(vlG, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxH)), 1);
+      const __m128i vlIJ = _mm_insert_epi32(vlI, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxJ)), 1);
+      const __m128i vlGHIJ = _mm_unpacklo_epi64(vlGH, vlIJ);
+      const uint32_t vidxK = (uint32_t) _mm_cvtsi128_si32(vidxKLMN);
+      const uint32_t vidxL = (uint32_t) _mm_extract_epi16(vidxKLMN, 2);
+      const uint32_t vidxM = (uint32_t) _mm_extract_epi16(vidxKLMN, 4);
+      const uint32_t vidxN = (uint32_t) _mm_extract_epi16(vidxKLMN, 6);
+      const __m128i vlK   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxK)));
+      const __m128i vlM = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxM)));
+      const __m128i vlKL = _mm_insert_epi32(vlK, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxL)), 1);
+      const __m128i vlMN = _mm_insert_epi32(vlM, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidxN)), 1);
+      const __m128i vlKLMN = _mm_unpacklo_epi64(vlKL, vlMN);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    __m128 vs89AB = _mm_castsi128_ps(_mm_add_epi32(vl89AB, ven89AB));
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_add_epi32(vlCDEF, venCDEF));
+    vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
+    __m128 vsGHIJ = _mm_castsi128_ps(_mm_add_epi32(vlGHIJ, venGHIJ));
+    vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
+    __m128 vsKLMN = _mm_castsi128_ps(_mm_add_epi32(vlKLMN, venKLMN));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+    __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    __m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_hi), vzKLMN);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_lo), vtKLMN);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc3, vt89AB), vc2);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc3, vtCDEF), vc2);
+    __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc3, vtGHIJ), vc2);
+    __m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc3, vtKLMN), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+    vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
+    vpKLMN = _mm_mul_ps(vpKLMN, vtKLMN);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+    vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
+    vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
+    vtKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
+    vsKLMN = _mm_sub_ps(vsKLMN, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vtKLMN);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+    const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
+    const __m128 veKLMN = _mm_mul_ps(_mm_add_ps(vpKLMN, vsKLMN), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+    vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
+    vxKLMN = _mm_mul_ps(vxKLMN, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+    const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
+    const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
+    const __m128 vyGHIJ = _mm_blendv_ps(vxGHIJ, veGHIJ, vxGHIJ);
+    const __m128 vyKLMN = _mm_blendv_ps(vxKLMN, veKLMN, vxKLMN);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    _mm_storeu_ps(y + 16, vyGHIJ);
+    _mm_storeu_ps(y + 20, vyKLMN);
+    y += 24;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x4.c b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x4.c
new file mode 100644
index 0000000..6234e17
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x4.c
@@ -0,0 +1,138 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x8.c b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x8.c
new file mode 100644
index 0000000..7c4aa67
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x8.c
@@ -0,0 +1,228 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    x += 8;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+
+    const __m128i vidx0123 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn0123), vindex_mask), 2);
+    const __m128i ven0123 = _mm_slli_epi32(_mm_castps_si128(vn0123), 19);
+    const __m128i vidx4567 = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn4567), vindex_mask), 2);
+    const __m128i ven4567 = _mm_slli_epi32(_mm_castps_si128(vn4567), 19);
+
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx01 = (uint64_t) _mm_cvtsi128_si64(vidx0123);
+      const uint64_t vidx23 = (uint64_t) _mm_extract_epi64(vidx0123, 1);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32))), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32))), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint64_t vidx45 = (uint64_t) _mm_cvtsi128_si64(vidx4567);
+      const uint64_t vidx67 = (uint64_t) _mm_extract_epi64(vidx4567, 1);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32))), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32))), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+    #else  // !XNN_ARCH_X86_64
+      const uint32_t vidx0 = (uint32_t) _mm_cvtsi128_si32(vidx0123);
+      const uint32_t vidx1 = (uint32_t) _mm_extract_epi16(vidx0123, 2);
+      const uint32_t vidx2 = (uint32_t) _mm_extract_epi16(vidx0123, 4);
+      const uint32_t vidx3 = (uint32_t) _mm_extract_epi16(vidx0123, 6);
+      const __m128i vl0   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx0)));
+      const __m128i vl2 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx2)));
+      const __m128i vl01 = _mm_insert_epi32(vl0, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx1)), 1);
+      const __m128i vl23 = _mm_insert_epi32(vl2, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx3)), 1);
+      const __m128i vl0123 = _mm_unpacklo_epi64(vl01, vl23);
+      const uint32_t vidx4 = (uint32_t) _mm_cvtsi128_si32(vidx4567);
+      const uint32_t vidx5 = (uint32_t) _mm_extract_epi16(vidx4567, 2);
+      const uint32_t vidx6 = (uint32_t) _mm_extract_epi16(vidx4567, 4);
+      const uint32_t vidx7 = (uint32_t) _mm_extract_epi16(vidx4567, 6);
+      const __m128i vl4   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx4)));
+      const __m128i vl6 = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx6)));
+      const __m128i vl45 = _mm_insert_epi32(vl4, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx5)), 1);
+      const __m128i vl67 = _mm_insert_epi32(vl6, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx7)), 1);
+      const __m128i vl4567 = _mm_unpacklo_epi64(vl45, vl67);
+    #endif  // XNN_ARCH_X86_64
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    __m128 vs0123 = _mm_castsi128_ps(_mm_add_epi32(vl0123, ven0123));
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    __m128 vs4567 = _mm_castsi128_ps(_mm_add_epi32(vl4567, ven4567));
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc3, vt0123), vc2);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc3, vt4567), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-p6-x12.c b/src/f32-velu/gen/velu-sse41-rr2-p6-x12.c
new file mode 100644
index 0000000..be76447
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-p6-x12.c
@@ -0,0 +1,188 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse41_rr2_p6_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    x += 12;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+    __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+    const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    y += 12;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-p6-x16.c b/src/f32-velu/gen/velu-sse41-rr2-p6-x16.c
new file mode 100644
index 0000000..bb64be9
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-p6-x16.c
@@ -0,0 +1,207 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse41_rr2_p6_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    x += 16;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+    __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+    const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
+    const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    y += 16;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-p6-x20.c b/src/f32-velu/gen/velu-sse41-rr2-p6-x20.c
new file mode 100644
index 0000000..9d60679
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-p6-x20.c
@@ -0,0 +1,226 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse41_rr2_p6_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    __m128 vxGHIJ = _mm_loadu_ps(x + 16);
+    x += 20;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+    const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+    __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+    __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
+    __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+    __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
+    __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+    vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+    vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
+    vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+    const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+    vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+    const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
+    const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
+    const __m128 vyGHIJ = _mm_blendv_ps(vxGHIJ, veGHIJ, vxGHIJ);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    _mm_storeu_ps(y + 16, vyGHIJ);
+    y += 20;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-p6-x24.c b/src/f32-velu/gen/velu-sse41-rr2-p6-x24.c
new file mode 100644
index 0000000..365e795
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-p6-x24.c
@@ -0,0 +1,245 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse41_rr2_p6_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    __m128 vx89AB = _mm_loadu_ps(x + 8);
+    __m128 vxCDEF = _mm_loadu_ps(x + 12);
+    __m128 vxGHIJ = _mm_loadu_ps(x + 16);
+    __m128 vxKLMN = _mm_loadu_ps(x + 20);
+    x += 24;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+    const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
+    const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
+    const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
+    const __m128 vzKLMN = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxKLMN, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+    __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
+    __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
+    __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
+    __m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+    __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
+    __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
+    __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
+    __m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+    vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
+    vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
+    vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
+    vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+    __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
+    __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
+    __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    __m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_hi), vzKLMN);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_lo), vtKLMN);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+    __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
+    __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
+    __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
+    __m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc6, vtKLMN), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+    vp89AB = _mm_mul_ps(vp89AB, vt89AB);
+    vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
+    vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
+    vpKLMN = _mm_mul_ps(vpKLMN, vtKLMN);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+    vt89AB = _mm_mul_ps(vt89AB, vs89AB);
+    vs89AB = _mm_sub_ps(vs89AB, vone);
+    vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
+    vsCDEF = _mm_sub_ps(vsCDEF, vone);
+    vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
+    vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
+    vtKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
+    vsKLMN = _mm_sub_ps(vsKLMN, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+    vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
+    vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
+    vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vtKLMN);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+    const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
+    const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
+    const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
+    const __m128 veKLMN = _mm_mul_ps(_mm_add_ps(vpKLMN, vsKLMN), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+    vx89AB = _mm_mul_ps(vx89AB, vbeta);
+    vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
+    vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
+    vxKLMN = _mm_mul_ps(vxKLMN, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+    const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
+    const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
+    const __m128 vyGHIJ = _mm_blendv_ps(vxGHIJ, veGHIJ, vxGHIJ);
+    const __m128 vyKLMN = _mm_blendv_ps(vxKLMN, veKLMN, vxKLMN);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    _mm_storeu_ps(y + 8, vy89AB);
+    _mm_storeu_ps(y + 12, vyCDEF);
+    _mm_storeu_ps(y + 16, vyGHIJ);
+    _mm_storeu_ps(y + 20, vyKLMN);
+    y += 24;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-p6-x4.c b/src/f32-velu/gen/velu-sse41-rr2-p6-x4.c
new file mode 100644
index 0000000..68c92e3
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-p6-x4.c
@@ -0,0 +1,110 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse41_rr2_p6_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-sse41-rr2-p6-x8.c b/src/f32-velu/gen/velu-sse41-rr2-p6-x8.c
new file mode 100644
index 0000000..71ac764
--- /dev/null
+++ b/src/f32-velu/gen/velu-sse41-rr2-p6-x8.c
@@ -0,0 +1,169 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/sse-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__sse41_rr2_p6_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    __m128 vx0123 = _mm_loadu_ps(x);
+    __m128 vx4567 = _mm_loadu_ps(x + 4);
+    x += 8;
+
+    const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
+    const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
+
+    __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
+    __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
+
+    __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
+    __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
+
+    vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
+    vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
+
+    __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
+    __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
+
+    vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
+
+    __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
+    __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
+
+    vp0123 = _mm_mul_ps(vp0123, vt0123);
+    vp4567 = _mm_mul_ps(vp4567, vt4567);
+
+    vt0123 = _mm_mul_ps(vt0123, vs0123);
+    vs0123 = _mm_sub_ps(vs0123, vone);
+    vt4567 = _mm_mul_ps(vt4567, vs4567);
+    vs4567 = _mm_sub_ps(vs4567, vone);
+
+    vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
+    vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
+
+    const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
+    const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
+
+    vx0123 = _mm_mul_ps(vx0123, vbeta);
+    vx4567 = _mm_mul_ps(vx4567, vbeta);
+
+    const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
+    const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
+
+    _mm_storeu_ps(y, vy0123);
+    _mm_storeu_ps(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    vx = _mm_mul_ps(vx, vbeta);
+    __m128 vy = _mm_blendv_ps(vx, ve, vx);
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x1.c b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x1.c
new file mode 100644
index 0000000..027a917
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x1.c
@@ -0,0 +1,73 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  do {
+    float vx = *x++;
+
+    const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+    float vn = vz * vlog2e + vmagic_bias;
+    const uint32_t ven = fp32_to_bits(vn) << 19;
+    const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+    vn -= vmagic_bias;
+
+    float vt = vn * vminus_ln2_hi + vz;
+    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+    vt = vn * vminus_ln2_lo + vt;
+
+    float vp = vc3 * vt + vc2;
+    vp *= vt;
+
+    vt *= vs;
+    vs -= vone;
+    vp = vp * vt + vt;
+    const float ve = (vp + vs) * valpha;
+
+    float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+    vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+    *y++ = vy;
+
+    n -= sizeof(float);
+  } while (n != 0);
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x2.c b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x2.c
new file mode 100644
index 0000000..88c0e64
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x2.c
@@ -0,0 +1,123 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    x += 2;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float vx = *x;
+
+    const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+    float vn = vz * vlog2e + vmagic_bias;
+    const uint32_t ven = fp32_to_bits(vn) << 19;
+    const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+    vn -= vmagic_bias;
+
+    float vt = vn * vminus_ln2_hi + vz;
+    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+    vt = vn * vminus_ln2_lo + vt;
+
+    float vp = vc3 * vt + vc2;
+    vp *= vt;
+
+    vt *= vs;
+    vs -= vone;
+    vp = vp * vt + vt;
+    const float ve = (vp + vs) * valpha;
+
+    float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+    vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+    *y = vy;
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x3.c b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x3.c
new file mode 100644
index 0000000..b404738
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x3.c
@@ -0,0 +1,145 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 3 * sizeof(float); n -= 3 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    x += 3;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+    const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+    const uint32_t ven2 = fp32_to_bits(vn2) << 19;
+    const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
+    vn2 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+    float vp2 = vc3 * vt2 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+    vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y += 3;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+      vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x4.c b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x4.c
new file mode 100644
index 0000000..c9ff69c
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x4.c
@@ -0,0 +1,163 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    x += 4;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+    const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
+    const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+    const uint32_t ven2 = fp32_to_bits(vn2) << 19;
+    const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
+    vn2 -= vmagic_bias;
+    const uint32_t ven3 = fp32_to_bits(vn3) << 19;
+    const uint32_t vidx3 = fp32_to_bits(vn3) & vindex_mask;
+    vn3 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vs3 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+    float vp2 = vc3 * vt2 + vc2;
+    float vp3 = vc3 * vt3 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+    vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
+    vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+      vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x5.c b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x5.c
new file mode 100644
index 0000000..00001db
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x5.c
@@ -0,0 +1,181 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 5 * sizeof(float); n -= 5 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    float vx4 = x[4];
+    x += 5;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+    const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
+    const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
+    const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+    float vn4 = vz4 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+    const uint32_t ven2 = fp32_to_bits(vn2) << 19;
+    const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
+    vn2 -= vmagic_bias;
+    const uint32_t ven3 = fp32_to_bits(vn3) << 19;
+    const uint32_t vidx3 = fp32_to_bits(vn3) & vindex_mask;
+    vn3 -= vmagic_bias;
+    const uint32_t ven4 = fp32_to_bits(vn4) << 19;
+    const uint32_t vidx4 = fp32_to_bits(vn4) & vindex_mask;
+    vn4 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vs3 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
+    float vt4 = vn4 * vminus_ln2_hi + vz4;
+    float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+    vt4 = vn4 * vminus_ln2_lo + vt4;
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+    float vp2 = vc3 * vt2 + vc2;
+    float vp3 = vc3 * vt3 + vc2;
+    float vp4 = vc3 * vt4 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+    vp4 *= vt4;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+    vt4 *= vs4;
+    vs4 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+    vp4 = vp4 * vt4 + vt4;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
+    const float ve4 = (vp4 + vs4) * valpha;
+    float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+    vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
+    vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
+    vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y[4] = vy4;
+    y += 5;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+      vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x6.c b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x6.c
new file mode 100644
index 0000000..d708b57
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x6.c
@@ -0,0 +1,199 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  for (; n >= 6 * sizeof(float); n -= 6 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    float vx4 = x[4];
+    float vx5 = x[5];
+    x += 6;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+    const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
+    const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
+    const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
+    const float vz5 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx5 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+    float vn4 = vz4 * vlog2e + vmagic_bias;
+    float vn5 = vz5 * vlog2e + vmagic_bias;
+
+    const uint32_t ven0 = fp32_to_bits(vn0) << 19;
+    const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
+    vn0 -= vmagic_bias;
+    const uint32_t ven1 = fp32_to_bits(vn1) << 19;
+    const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
+    vn1 -= vmagic_bias;
+    const uint32_t ven2 = fp32_to_bits(vn2) << 19;
+    const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
+    vn2 -= vmagic_bias;
+    const uint32_t ven3 = fp32_to_bits(vn3) << 19;
+    const uint32_t vidx3 = fp32_to_bits(vn3) & vindex_mask;
+    vn3 -= vmagic_bias;
+    const uint32_t ven4 = fp32_to_bits(vn4) << 19;
+    const uint32_t vidx4 = fp32_to_bits(vn4) & vindex_mask;
+    vn4 -= vmagic_bias;
+    const uint32_t ven5 = fp32_to_bits(vn5) << 19;
+    const uint32_t vidx5 = fp32_to_bits(vn5) & vindex_mask;
+    vn5 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vs3 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
+    float vt4 = vn4 * vminus_ln2_hi + vz4;
+    float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
+    float vt5 = vn5 * vminus_ln2_hi + vz5;
+    float vs5 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx5] + ven5);
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+    vt4 = vn4 * vminus_ln2_lo + vt4;
+    vt5 = vn5 * vminus_ln2_lo + vt5;
+
+    float vp0 = vc3 * vt0 + vc2;
+    float vp1 = vc3 * vt1 + vc2;
+    float vp2 = vc3 * vt2 + vc2;
+    float vp3 = vc3 * vt3 + vc2;
+    float vp4 = vc3 * vt4 + vc2;
+    float vp5 = vc3 * vt5 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+    vp4 *= vt4;
+    vp5 *= vt5;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+    vt4 *= vs4;
+    vs4 -= vone;
+    vt5 *= vs5;
+    vs5 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+    vp4 = vp4 * vt4 + vt4;
+    vp5 = vp5 * vt5 + vt5;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
+    const float ve4 = (vp4 + vs4) * valpha;
+    float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
+    const float ve5 = (vp5 + vs5) * valpha;
+    float vy5 = __builtin_wasm_max_f32(vx5 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+    vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
+    vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
+    vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
+    vy5 += __builtin_wasm_min_f32(ve5, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y[4] = vy4;
+    y[5] = vy5;
+    y += 6;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+      vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-p6-x1.c b/src/f32-velu/gen/velu-wasm-rr2-p6-x1.c
new file mode 100644
index 0000000..1fcf12a
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-p6-x1.c
@@ -0,0 +1,74 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__wasm_rr2_p6_x1(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  do {
+    float vx = *x++;
+
+    const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+    float vn = vz * vlog2e + vmagic_bias;
+    float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+    vn -= vmagic_bias;
+
+    float vt = vn * vminus_ln2_hi + vz;
+    vt = vn * vminus_ln2_lo + vt;
+
+
+    float vp = vc6 * vt + vc5;
+    vp = vp * vt + vc4;
+    vp = vp * vt + vc3;
+    vp = vp * vt + vc2;
+    vp *= vt;
+
+    vt *= vs;
+    vs -= vone;
+    vp = vp * vt + vt;
+    const float ve = (vp + vs) * valpha;
+
+    float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+    vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+    *y++ = vy;
+
+    n -= sizeof(float);
+  } while (n != 0);
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-p6-x2.c b/src/f32-velu/gen/velu-wasm-rr2-p6-x2.c
new file mode 100644
index 0000000..370e8d7
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-p6-x2.c
@@ -0,0 +1,130 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__wasm_rr2_p6_x2(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    x += 2;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y += 2;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float vx = *x;
+
+    const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+    float vn = vz * vlog2e + vmagic_bias;
+    float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+    vn -= vmagic_bias;
+
+    float vt = vn * vminus_ln2_hi + vz;
+    vt = vn * vminus_ln2_lo + vt;
+
+
+    float vp = vc6 * vt + vc5;
+    vp = vp * vt + vc4;
+    vp = vp * vt + vc3;
+    vp = vp * vt + vc2;
+    vp *= vt;
+
+    vt *= vs;
+    vs -= vone;
+    vp = vp * vt + vt;
+    const float ve = (vp + vs) * valpha;
+
+    float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+    vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+    *y = vy;
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-p6-x3.c b/src/f32-velu/gen/velu-wasm-rr2-p6-x3.c
new file mode 100644
index 0000000..88f5f82
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-p6-x3.c
@@ -0,0 +1,153 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__wasm_rr2_p6_x3(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 3 * sizeof(float); n -= 3 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    x += 3;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+    const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+    float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23);
+    vn2 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+    float vp2 = vc6 * vt2 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+    vp2 = vp2 * vt2 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+    vp2 = vp2 * vt2 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+    vp2 = vp2 * vt2 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+    vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y += 3;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+      vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-p6-x4.c b/src/f32-velu/gen/velu-wasm-rr2-p6-x4.c
new file mode 100644
index 0000000..3904497
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-p6-x4.c
@@ -0,0 +1,172 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__wasm_rr2_p6_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    x += 4;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+    const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
+    const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+    float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23);
+    vn2 -= vmagic_bias;
+    float vs3 = fp32_from_bits(fp32_to_bits(vn3) << 23);
+    vn3 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+    float vp2 = vc6 * vt2 + vc5;
+    float vp3 = vc6 * vt3 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+    vp2 = vp2 * vt2 + vc4;
+    vp3 = vp3 * vt3 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+    vp2 = vp2 * vt2 + vc3;
+    vp3 = vp3 * vt3 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+    vp2 = vp2 * vt2 + vc2;
+    vp3 = vp3 * vt3 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+    vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
+    vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+      vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-p6-x5.c b/src/f32-velu/gen/velu-wasm-rr2-p6-x5.c
new file mode 100644
index 0000000..7191010
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-p6-x5.c
@@ -0,0 +1,191 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__wasm_rr2_p6_x5(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 5 * sizeof(float); n -= 5 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    float vx4 = x[4];
+    x += 5;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+    const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
+    const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
+    const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+    float vn4 = vz4 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+    float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23);
+    vn2 -= vmagic_bias;
+    float vs3 = fp32_from_bits(fp32_to_bits(vn3) << 23);
+    vn3 -= vmagic_bias;
+    float vs4 = fp32_from_bits(fp32_to_bits(vn4) << 23);
+    vn4 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vt4 = vn4 * vminus_ln2_hi + vz4;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+    vt4 = vn4 * vminus_ln2_lo + vt4;
+
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+    float vp2 = vc6 * vt2 + vc5;
+    float vp3 = vc6 * vt3 + vc5;
+    float vp4 = vc6 * vt4 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+    vp2 = vp2 * vt2 + vc4;
+    vp3 = vp3 * vt3 + vc4;
+    vp4 = vp4 * vt4 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+    vp2 = vp2 * vt2 + vc3;
+    vp3 = vp3 * vt3 + vc3;
+    vp4 = vp4 * vt4 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+    vp2 = vp2 * vt2 + vc2;
+    vp3 = vp3 * vt3 + vc2;
+    vp4 = vp4 * vt4 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+    vp4 *= vt4;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+    vt4 *= vs4;
+    vs4 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+    vp4 = vp4 * vt4 + vt4;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
+    const float ve4 = (vp4 + vs4) * valpha;
+    float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+    vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
+    vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
+    vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y[4] = vy4;
+    y += 5;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+      vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasm-rr2-p6-x6.c b/src/f32-velu/gen/velu-wasm-rr2-p6-x6.c
new file mode 100644
index 0000000..3ff871e
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasm-rr2-p6-x6.c
@@ -0,0 +1,210 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/scalar-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__wasm_rr2_p6_x6(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  for (; n >= 6 * sizeof(float); n -= 6 * sizeof(float)) {
+    float vx0 = x[0];
+    float vx1 = x[1];
+    float vx2 = x[2];
+    float vx3 = x[3];
+    float vx4 = x[4];
+    float vx5 = x[5];
+    x += 6;
+
+    const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
+    const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
+    const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
+    const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
+    const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
+    const float vz5 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx5 * vprescale, vsat_cutoff), 0.0f);
+
+    float vn0 = vz0 * vlog2e + vmagic_bias;
+    float vn1 = vz1 * vlog2e + vmagic_bias;
+    float vn2 = vz2 * vlog2e + vmagic_bias;
+    float vn3 = vz3 * vlog2e + vmagic_bias;
+    float vn4 = vz4 * vlog2e + vmagic_bias;
+    float vn5 = vz5 * vlog2e + vmagic_bias;
+
+    float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23);
+    vn0 -= vmagic_bias;
+    float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23);
+    vn1 -= vmagic_bias;
+    float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23);
+    vn2 -= vmagic_bias;
+    float vs3 = fp32_from_bits(fp32_to_bits(vn3) << 23);
+    vn3 -= vmagic_bias;
+    float vs4 = fp32_from_bits(fp32_to_bits(vn4) << 23);
+    vn4 -= vmagic_bias;
+    float vs5 = fp32_from_bits(fp32_to_bits(vn5) << 23);
+    vn5 -= vmagic_bias;
+
+    float vt0 = vn0 * vminus_ln2_hi + vz0;
+    float vt1 = vn1 * vminus_ln2_hi + vz1;
+    float vt2 = vn2 * vminus_ln2_hi + vz2;
+    float vt3 = vn3 * vminus_ln2_hi + vz3;
+    float vt4 = vn4 * vminus_ln2_hi + vz4;
+    float vt5 = vn5 * vminus_ln2_hi + vz5;
+
+    vt0 = vn0 * vminus_ln2_lo + vt0;
+    vt1 = vn1 * vminus_ln2_lo + vt1;
+    vt2 = vn2 * vminus_ln2_lo + vt2;
+    vt3 = vn3 * vminus_ln2_lo + vt3;
+    vt4 = vn4 * vminus_ln2_lo + vt4;
+    vt5 = vn5 * vminus_ln2_lo + vt5;
+
+
+    float vp0 = vc6 * vt0 + vc5;
+    float vp1 = vc6 * vt1 + vc5;
+    float vp2 = vc6 * vt2 + vc5;
+    float vp3 = vc6 * vt3 + vc5;
+    float vp4 = vc6 * vt4 + vc5;
+    float vp5 = vc6 * vt5 + vc5;
+
+    vp0 = vp0 * vt0 + vc4;
+    vp1 = vp1 * vt1 + vc4;
+    vp2 = vp2 * vt2 + vc4;
+    vp3 = vp3 * vt3 + vc4;
+    vp4 = vp4 * vt4 + vc4;
+    vp5 = vp5 * vt5 + vc4;
+
+    vp0 = vp0 * vt0 + vc3;
+    vp1 = vp1 * vt1 + vc3;
+    vp2 = vp2 * vt2 + vc3;
+    vp3 = vp3 * vt3 + vc3;
+    vp4 = vp4 * vt4 + vc3;
+    vp5 = vp5 * vt5 + vc3;
+
+    vp0 = vp0 * vt0 + vc2;
+    vp1 = vp1 * vt1 + vc2;
+    vp2 = vp2 * vt2 + vc2;
+    vp3 = vp3 * vt3 + vc2;
+    vp4 = vp4 * vt4 + vc2;
+    vp5 = vp5 * vt5 + vc2;
+
+    vp0 *= vt0;
+    vp1 *= vt1;
+    vp2 *= vt2;
+    vp3 *= vt3;
+    vp4 *= vt4;
+    vp5 *= vt5;
+
+    vt0 *= vs0;
+    vs0 -= vone;
+    vt1 *= vs1;
+    vs1 -= vone;
+    vt2 *= vs2;
+    vs2 -= vone;
+    vt3 *= vs3;
+    vs3 -= vone;
+    vt4 *= vs4;
+    vs4 -= vone;
+    vt5 *= vs5;
+    vs5 -= vone;
+
+    vp0 = vp0 * vt0 + vt0;
+    vp1 = vp1 * vt1 + vt1;
+    vp2 = vp2 * vt2 + vt2;
+    vp3 = vp3 * vt3 + vt3;
+    vp4 = vp4 * vt4 + vt4;
+    vp5 = vp5 * vt5 + vt5;
+
+    const float ve0 = (vp0 + vs0) * valpha;
+    float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
+    const float ve1 = (vp1 + vs1) * valpha;
+    float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
+    const float ve2 = (vp2 + vs2) * valpha;
+    float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
+    const float ve3 = (vp3 + vs3) * valpha;
+    float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
+    const float ve4 = (vp4 + vs4) * valpha;
+    float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
+    const float ve5 = (vp5 + vs5) * valpha;
+    float vy5 = __builtin_wasm_max_f32(vx5 * vbeta, 0.0f);
+
+    vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
+    vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
+    vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
+    vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
+    vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
+    vy5 += __builtin_wasm_min_f32(ve5, 0.0f);
+
+    y[0] = vy0;
+    y[1] = vy1;
+    y[2] = vy2;
+    y[3] = vy3;
+    y[4] = vy4;
+    y[5] = vy5;
+    y += 6;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    do {
+      float vx = *x++;
+
+      const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+      vy += __builtin_wasm_min_f32(ve, 0.0f);
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x12.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x12.c
new file mode 100644
index 0000000..71287c8
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x12.c
@@ -0,0 +1,225 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    x += 12;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+    const v128_t vz89AB = wasm_f32x4_max(wasm_f32x4_mul(vx89AB, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+    const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
+    const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+    const uint64_t vidx89 = wasm_i64x2_extract_lane(vidx89AB, 0);
+    const uint64_t vidxAB = wasm_i64x2_extract_lane(vidx89AB, 1);
+    const float vl8   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    const float vl9 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)));
+    const float vlA = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    const float vlB = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)));
+    const v128_t vl89AB = wasm_f32x4_make(vl8, vl9, vlA, vlB);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt89AB), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    y += 12;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x16.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x16.c
new file mode 100644
index 0000000..c0a47b3
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x16.c
@@ -0,0 +1,251 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    x += 16;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+    const v128_t vz89AB = wasm_f32x4_max(wasm_f32x4_mul(vx89AB, vprescale), vsat_cutoff);
+    const v128_t vzCDEF = wasm_f32x4_max(wasm_f32x4_mul(vxCDEF, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+    const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
+    const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
+    const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
+    const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+    const uint64_t vidx89 = wasm_i64x2_extract_lane(vidx89AB, 0);
+    const uint64_t vidxAB = wasm_i64x2_extract_lane(vidx89AB, 1);
+    const float vl8   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    const float vl9 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)));
+    const float vlA = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    const float vlB = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)));
+    const v128_t vl89AB = wasm_f32x4_make(vl8, vl9, vlA, vlB);
+    const uint64_t vidxCD = wasm_i64x2_extract_lane(vidxCDEF, 0);
+    const uint64_t vidxEF = wasm_i64x2_extract_lane(vidxCDEF, 1);
+    const float vlC   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    const float vlD = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)));
+    const float vlE = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    const float vlF = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)));
+    const v128_t vlCDEF = wasm_f32x4_make(vlC, vlD, vlE, vlF);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt89AB), vc2);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtCDEF), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    y += 16;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x20.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x20.c
new file mode 100644
index 0000000..b234260
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x20.c
@@ -0,0 +1,277 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    v128_t vxGHIJ = wasm_v128_load(x + 16);
+    x += 20;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+    const v128_t vz89AB = wasm_f32x4_max(wasm_f32x4_mul(vx89AB, vprescale), vsat_cutoff);
+    const v128_t vzCDEF = wasm_f32x4_max(wasm_f32x4_mul(vxCDEF, vprescale), vsat_cutoff);
+    const v128_t vzGHIJ = wasm_f32x4_max(wasm_f32x4_mul(vxGHIJ, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+    v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+    const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
+    const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
+    const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
+    const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
+    const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
+    const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+    const uint64_t vidx89 = wasm_i64x2_extract_lane(vidx89AB, 0);
+    const uint64_t vidxAB = wasm_i64x2_extract_lane(vidx89AB, 1);
+    const float vl8   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    const float vl9 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)));
+    const float vlA = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    const float vlB = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)));
+    const v128_t vl89AB = wasm_f32x4_make(vl8, vl9, vlA, vlB);
+    const uint64_t vidxCD = wasm_i64x2_extract_lane(vidxCDEF, 0);
+    const uint64_t vidxEF = wasm_i64x2_extract_lane(vidxCDEF, 1);
+    const float vlC   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    const float vlD = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)));
+    const float vlE = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    const float vlF = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)));
+    const v128_t vlCDEF = wasm_f32x4_make(vlC, vlD, vlE, vlF);
+    const uint64_t vidxGH = wasm_i64x2_extract_lane(vidxGHIJ, 0);
+    const uint64_t vidxIJ = wasm_i64x2_extract_lane(vidxGHIJ, 1);
+    const float vlG   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
+    const float vlH = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)));
+    const float vlI = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
+    const float vlJ = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)));
+    const v128_t vlGHIJ = wasm_f32x4_make(vlG, vlH, vlI, vlJ);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
+    vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
+    v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt89AB), vc2);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtCDEF), vc2);
+    v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtGHIJ), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+    vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+    vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
+    vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+    const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+    const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
+    vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+    const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    wasm_v128_store(y + 16, vyGHIJ);
+    y += 20;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x24.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x24.c
new file mode 100644
index 0000000..cc9a554
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x24.c
@@ -0,0 +1,303 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    v128_t vxGHIJ = wasm_v128_load(x + 16);
+    v128_t vxKLMN = wasm_v128_load(x + 20);
+    x += 24;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+    const v128_t vz89AB = wasm_f32x4_max(wasm_f32x4_mul(vx89AB, vprescale), vsat_cutoff);
+    const v128_t vzCDEF = wasm_f32x4_max(wasm_f32x4_mul(vxCDEF, vprescale), vsat_cutoff);
+    const v128_t vzGHIJ = wasm_f32x4_max(wasm_f32x4_mul(vxGHIJ, vprescale), vsat_cutoff);
+    const v128_t vzKLMN = wasm_f32x4_max(wasm_f32x4_mul(vxKLMN, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+    v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
+    v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+    const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
+    const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
+    const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
+    const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
+    const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
+    const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
+    const v128_t vidxKLMN = wasm_i32x4_shl(wasm_v128_and(vnKLMN, vindex_mask), 2);
+    const v128_t venKLMN = wasm_i32x4_shl(vnKLMN, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+    const uint64_t vidx89 = wasm_i64x2_extract_lane(vidx89AB, 0);
+    const uint64_t vidxAB = wasm_i64x2_extract_lane(vidx89AB, 1);
+    const float vl8   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    const float vl9 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)));
+    const float vlA = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    const float vlB = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)));
+    const v128_t vl89AB = wasm_f32x4_make(vl8, vl9, vlA, vlB);
+    const uint64_t vidxCD = wasm_i64x2_extract_lane(vidxCDEF, 0);
+    const uint64_t vidxEF = wasm_i64x2_extract_lane(vidxCDEF, 1);
+    const float vlC   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    const float vlD = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)));
+    const float vlE = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    const float vlF = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)));
+    const v128_t vlCDEF = wasm_f32x4_make(vlC, vlD, vlE, vlF);
+    const uint64_t vidxGH = wasm_i64x2_extract_lane(vidxGHIJ, 0);
+    const uint64_t vidxIJ = wasm_i64x2_extract_lane(vidxGHIJ, 1);
+    const float vlG   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
+    const float vlH = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)));
+    const float vlI = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
+    const float vlJ = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)));
+    const v128_t vlGHIJ = wasm_f32x4_make(vlG, vlH, vlI, vlJ);
+    const uint64_t vidxKL = wasm_i64x2_extract_lane(vidxKLMN, 0);
+    const uint64_t vidxMN = wasm_i64x2_extract_lane(vidxKLMN, 1);
+    const float vlK   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxKL));
+    const float vlL = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxKL >> 32)));
+    const float vlM = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxMN));
+    const float vlN = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxMN >> 32)));
+    const v128_t vlKLMN = wasm_f32x4_make(vlK, vlL, vlM, vlN);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
+    vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
+    v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
+    vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
+    v128_t vsKLMN = wasm_i32x4_add(vlKLMN, venKLMN);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_hi), vzKLMN);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt89AB), vc2);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtCDEF), vc2);
+    v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtGHIJ), vc2);
+    v128_t vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtKLMN), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+    vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
+    vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+    vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
+    vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
+    vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
+    vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vtKLMN);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+    const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
+    const v128_t veKLMN = wasm_f32x4_mul(wasm_f32x4_add(vpKLMN, vsKLMN), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+    const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
+    vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
+    const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
+    vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+    const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
+    const v128_t vyKLMN = wasm_v128_bitselect(veKLMN, vxKLMN, vsignmKLMN);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    wasm_v128_store(y + 16, vyGHIJ);
+    wasm_v128_store(y + 20, vyKLMN);
+    y += 24;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x4.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x4.c
new file mode 100644
index 0000000..dccd02b
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x4.c
@@ -0,0 +1,128 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x8.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x8.c
new file mode 100644
index 0000000..1902b8f
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-lut16-p3-x8.c
@@ -0,0 +1,199 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    x += 8;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x12.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x12.c
new file mode 100644
index 0000000..da10592
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x12.c
@@ -0,0 +1,193 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    x += 12;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+    const v128_t vz89AB = wasm_f32x4_max(wasm_f32x4_mul(vx89AB, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+    v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    y += 12;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x16.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x16.c
new file mode 100644
index 0000000..657d806
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x16.c
@@ -0,0 +1,213 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    x += 16;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+    const v128_t vz89AB = wasm_f32x4_max(wasm_f32x4_mul(vx89AB, vprescale), vsat_cutoff);
+    const v128_t vzCDEF = wasm_f32x4_max(wasm_f32x4_mul(vxCDEF, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+    v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
+    v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    y += 16;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x20.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x20.c
new file mode 100644
index 0000000..cadf4fd
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x20.c
@@ -0,0 +1,233 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    v128_t vxGHIJ = wasm_v128_load(x + 16);
+    x += 20;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+    const v128_t vz89AB = wasm_f32x4_max(wasm_f32x4_mul(vx89AB, vprescale), vsat_cutoff);
+    const v128_t vzCDEF = wasm_f32x4_max(wasm_f32x4_mul(vxCDEF, vprescale), vsat_cutoff);
+    const v128_t vzGHIJ = wasm_f32x4_max(wasm_f32x4_mul(vxGHIJ, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+    v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+    v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
+    v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
+    v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
+    v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+    vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+    vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
+    vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+    const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+    const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
+    vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+    const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    wasm_v128_store(y + 16, vyGHIJ);
+    y += 20;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x24.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x24.c
new file mode 100644
index 0000000..a9bdf4e
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x24.c
@@ -0,0 +1,253 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    v128_t vxGHIJ = wasm_v128_load(x + 16);
+    v128_t vxKLMN = wasm_v128_load(x + 20);
+    x += 24;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+    const v128_t vz89AB = wasm_f32x4_max(wasm_f32x4_mul(vx89AB, vprescale), vsat_cutoff);
+    const v128_t vzCDEF = wasm_f32x4_max(wasm_f32x4_mul(vxCDEF, vprescale), vsat_cutoff);
+    const v128_t vzGHIJ = wasm_f32x4_max(wasm_f32x4_mul(vxGHIJ, vprescale), vsat_cutoff);
+    const v128_t vzKLMN = wasm_f32x4_max(wasm_f32x4_mul(vxKLMN, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+    v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
+    v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+    v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
+    v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
+    v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
+    v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
+    vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_hi), vzKLMN);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
+    v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
+    v128_t vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtKLMN), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+    vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
+    vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+    vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
+    vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
+    vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
+    vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vtKLMN);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+    const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
+    const v128_t veKLMN = wasm_f32x4_mul(wasm_f32x4_add(vpKLMN, vsKLMN), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+    const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
+    vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
+    const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
+    vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+    const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
+    const v128_t vyKLMN = wasm_v128_bitselect(veKLMN, vxKLMN, vsignmKLMN);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    wasm_v128_store(y + 16, vyGHIJ);
+    wasm_v128_store(y + 20, vyKLMN);
+    y += 24;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x4.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x4.c
new file mode 100644
index 0000000..d18a0b4
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x4.c
@@ -0,0 +1,112 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x8.c b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x8.c
new file mode 100644
index 0000000..904578d
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-arm-rr2-p6-x8.c
@@ -0,0 +1,173 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    x += 8;
+
+    const v128_t vz0123 = wasm_f32x4_max(wasm_f32x4_mul(vx0123, vprescale), vsat_cutoff);
+    const v128_t vz4567 = wasm_f32x4_max(wasm_f32x4_mul(vx4567, vprescale), vsat_cutoff);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x12.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x12.c
new file mode 100644
index 0000000..95afa9a
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x12.c
@@ -0,0 +1,240 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    x += 12;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+    const v128_t vz89AB = wasm_f32x4_mul(vx89AB, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+    const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
+    const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+    const uint64_t vidx89 = wasm_i64x2_extract_lane(vidx89AB, 0);
+    const uint64_t vidxAB = wasm_i64x2_extract_lane(vidx89AB, 1);
+    const float vl8   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    const float vl9 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)));
+    const float vlA = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    const float vlB = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)));
+    const v128_t vl89AB = wasm_f32x4_make(vl8, vl9, vlA, vlB);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    const v128_t vsatm89AB = wasm_f32x4_le(vz89AB, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vs89AB = wasm_v128_andnot(vs89AB, vsatm89AB);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+    vt89AB = wasm_v128_andnot(vt89AB, vsatm89AB);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt89AB), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    y += 12;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x16.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x16.c
new file mode 100644
index 0000000..40cf131
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x16.c
@@ -0,0 +1,269 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    x += 16;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+    const v128_t vz89AB = wasm_f32x4_mul(vx89AB, vprescale);
+    const v128_t vzCDEF = wasm_f32x4_mul(vxCDEF, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+    const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
+    const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
+    const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
+    const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+    const uint64_t vidx89 = wasm_i64x2_extract_lane(vidx89AB, 0);
+    const uint64_t vidxAB = wasm_i64x2_extract_lane(vidx89AB, 1);
+    const float vl8   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    const float vl9 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)));
+    const float vlA = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    const float vlB = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)));
+    const v128_t vl89AB = wasm_f32x4_make(vl8, vl9, vlA, vlB);
+    const uint64_t vidxCD = wasm_i64x2_extract_lane(vidxCDEF, 0);
+    const uint64_t vidxEF = wasm_i64x2_extract_lane(vidxCDEF, 1);
+    const float vlC   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    const float vlD = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)));
+    const float vlE = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    const float vlF = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)));
+    const v128_t vlCDEF = wasm_f32x4_make(vlC, vlD, vlE, vlF);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    const v128_t vsatm89AB = wasm_f32x4_le(vz89AB, vsat_cutoff);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    const v128_t vsatmCDEF = wasm_f32x4_le(vzCDEF, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vs89AB = wasm_v128_andnot(vs89AB, vsatm89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vsCDEF = wasm_v128_andnot(vsCDEF, vsatmCDEF);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+    vt89AB = wasm_v128_andnot(vt89AB, vsatm89AB);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt89AB), vc2);
+    vtCDEF = wasm_v128_andnot(vtCDEF, vsatmCDEF);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtCDEF), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    y += 16;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x20.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x20.c
new file mode 100644
index 0000000..31f07e4
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x20.c
@@ -0,0 +1,298 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    v128_t vxGHIJ = wasm_v128_load(x + 16);
+    x += 20;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+    const v128_t vz89AB = wasm_f32x4_mul(vx89AB, vprescale);
+    const v128_t vzCDEF = wasm_f32x4_mul(vxCDEF, vprescale);
+    const v128_t vzGHIJ = wasm_f32x4_mul(vxGHIJ, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+    v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+    const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
+    const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
+    const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
+    const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
+    const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
+    const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+    const uint64_t vidx89 = wasm_i64x2_extract_lane(vidx89AB, 0);
+    const uint64_t vidxAB = wasm_i64x2_extract_lane(vidx89AB, 1);
+    const float vl8   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    const float vl9 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)));
+    const float vlA = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    const float vlB = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)));
+    const v128_t vl89AB = wasm_f32x4_make(vl8, vl9, vlA, vlB);
+    const uint64_t vidxCD = wasm_i64x2_extract_lane(vidxCDEF, 0);
+    const uint64_t vidxEF = wasm_i64x2_extract_lane(vidxCDEF, 1);
+    const float vlC   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    const float vlD = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)));
+    const float vlE = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    const float vlF = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)));
+    const v128_t vlCDEF = wasm_f32x4_make(vlC, vlD, vlE, vlF);
+    const uint64_t vidxGH = wasm_i64x2_extract_lane(vidxGHIJ, 0);
+    const uint64_t vidxIJ = wasm_i64x2_extract_lane(vidxGHIJ, 1);
+    const float vlG   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
+    const float vlH = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)));
+    const float vlI = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
+    const float vlJ = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)));
+    const v128_t vlGHIJ = wasm_f32x4_make(vlG, vlH, vlI, vlJ);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
+    vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
+    v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    const v128_t vsatm89AB = wasm_f32x4_le(vz89AB, vsat_cutoff);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    const v128_t vsatmCDEF = wasm_f32x4_le(vzCDEF, vsat_cutoff);
+    v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    const v128_t vsatmGHIJ = wasm_f32x4_le(vzGHIJ, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vs89AB = wasm_v128_andnot(vs89AB, vsatm89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vsCDEF = wasm_v128_andnot(vsCDEF, vsatmCDEF);
+    vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vsGHIJ = wasm_v128_andnot(vsGHIJ, vsatmGHIJ);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+    vt89AB = wasm_v128_andnot(vt89AB, vsatm89AB);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt89AB), vc2);
+    vtCDEF = wasm_v128_andnot(vtCDEF, vsatmCDEF);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtCDEF), vc2);
+    vtGHIJ = wasm_v128_andnot(vtGHIJ, vsatmGHIJ);
+    v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtGHIJ), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+    vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+    vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
+    vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+    const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+    const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
+    vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+    const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    wasm_v128_store(y + 16, vyGHIJ);
+    y += 20;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x24.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x24.c
new file mode 100644
index 0000000..f727332
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x24.c
@@ -0,0 +1,327 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    v128_t vxGHIJ = wasm_v128_load(x + 16);
+    v128_t vxKLMN = wasm_v128_load(x + 20);
+    x += 24;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+    const v128_t vz89AB = wasm_f32x4_mul(vx89AB, vprescale);
+    const v128_t vzCDEF = wasm_f32x4_mul(vxCDEF, vprescale);
+    const v128_t vzGHIJ = wasm_f32x4_mul(vxGHIJ, vprescale);
+    const v128_t vzKLMN = wasm_f32x4_mul(vxKLMN, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+    v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
+    v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+    const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
+    const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
+    const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
+    const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
+    const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
+    const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
+    const v128_t vidxKLMN = wasm_i32x4_shl(wasm_v128_and(vnKLMN, vindex_mask), 2);
+    const v128_t venKLMN = wasm_i32x4_shl(vnKLMN, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+    const uint64_t vidx89 = wasm_i64x2_extract_lane(vidx89AB, 0);
+    const uint64_t vidxAB = wasm_i64x2_extract_lane(vidx89AB, 1);
+    const float vl8   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
+    const float vl9 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)));
+    const float vlA = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
+    const float vlB = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)));
+    const v128_t vl89AB = wasm_f32x4_make(vl8, vl9, vlA, vlB);
+    const uint64_t vidxCD = wasm_i64x2_extract_lane(vidxCDEF, 0);
+    const uint64_t vidxEF = wasm_i64x2_extract_lane(vidxCDEF, 1);
+    const float vlC   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
+    const float vlD = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)));
+    const float vlE = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
+    const float vlF = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)));
+    const v128_t vlCDEF = wasm_f32x4_make(vlC, vlD, vlE, vlF);
+    const uint64_t vidxGH = wasm_i64x2_extract_lane(vidxGHIJ, 0);
+    const uint64_t vidxIJ = wasm_i64x2_extract_lane(vidxGHIJ, 1);
+    const float vlG   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
+    const float vlH = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)));
+    const float vlI = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
+    const float vlJ = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)));
+    const v128_t vlGHIJ = wasm_f32x4_make(vlG, vlH, vlI, vlJ);
+    const uint64_t vidxKL = wasm_i64x2_extract_lane(vidxKLMN, 0);
+    const uint64_t vidxMN = wasm_i64x2_extract_lane(vidxKLMN, 1);
+    const float vlK   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxKL));
+    const float vlL = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxKL >> 32)));
+    const float vlM = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxMN));
+    const float vlN = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxMN >> 32)));
+    const v128_t vlKLMN = wasm_f32x4_make(vlK, vlL, vlM, vlN);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
+    vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
+    v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
+    vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
+    v128_t vsKLMN = wasm_i32x4_add(vlKLMN, venKLMN);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    const v128_t vsatm89AB = wasm_f32x4_le(vz89AB, vsat_cutoff);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    const v128_t vsatmCDEF = wasm_f32x4_le(vzCDEF, vsat_cutoff);
+    v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    const v128_t vsatmGHIJ = wasm_f32x4_le(vzGHIJ, vsat_cutoff);
+    v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_hi), vzKLMN);
+    const v128_t vsatmKLMN = wasm_f32x4_le(vzKLMN, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vs89AB = wasm_v128_andnot(vs89AB, vsatm89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vsCDEF = wasm_v128_andnot(vsCDEF, vsatmCDEF);
+    vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vsGHIJ = wasm_v128_andnot(vsGHIJ, vsatmGHIJ);
+    vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
+    vsKLMN = wasm_v128_andnot(vsKLMN, vsatmKLMN);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+    vt89AB = wasm_v128_andnot(vt89AB, vsatm89AB);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt89AB), vc2);
+    vtCDEF = wasm_v128_andnot(vtCDEF, vsatmCDEF);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtCDEF), vc2);
+    vtGHIJ = wasm_v128_andnot(vtGHIJ, vsatmGHIJ);
+    v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtGHIJ), vc2);
+    vtKLMN = wasm_v128_andnot(vtKLMN, vsatmKLMN);
+    v128_t vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vc3, vtKLMN), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+    vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
+    vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+    vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
+    vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
+    vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
+    vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vtKLMN);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+    const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
+    const v128_t veKLMN = wasm_f32x4_mul(wasm_f32x4_add(vpKLMN, vsKLMN), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+    const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
+    vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
+    const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
+    vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+    const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
+    const v128_t vyKLMN = wasm_v128_bitselect(veKLMN, vxKLMN, vsignmKLMN);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    wasm_v128_store(y + 16, vyGHIJ);
+    wasm_v128_store(y + 20, vyKLMN);
+    y += 24;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x4.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x4.c
new file mode 100644
index 0000000..0d7211e
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x4.c
@@ -0,0 +1,134 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x8.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x8.c
new file mode 100644
index 0000000..215ca13
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-lut16-p3-x8.c
@@ -0,0 +1,211 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    x += 8;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+
+    const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
+    const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
+    const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
+    const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
+
+    const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
+    const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
+    const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)));
+    const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+    const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
+    const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
+    const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
+    const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)));
+    const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
+    const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)));
+    const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt0123), vc2);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt4567), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x12.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x12.c
new file mode 100644
index 0000000..ad46018
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x12.c
@@ -0,0 +1,208 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    x += 12;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+    const v128_t vz89AB = wasm_f32x4_mul(vx89AB, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+    v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    const v128_t vsatm89AB = wasm_f32x4_le(vz89AB, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vs89AB = wasm_v128_andnot(vs89AB, vsatm89AB);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+    vt89AB = wasm_v128_andnot(vt89AB, vsatm89AB);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    y += 12;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x16.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x16.c
new file mode 100644
index 0000000..7732192
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x16.c
@@ -0,0 +1,231 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    x += 16;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+    const v128_t vz89AB = wasm_f32x4_mul(vx89AB, vprescale);
+    const v128_t vzCDEF = wasm_f32x4_mul(vxCDEF, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+    v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
+    v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    const v128_t vsatm89AB = wasm_f32x4_le(vz89AB, vsat_cutoff);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    const v128_t vsatmCDEF = wasm_f32x4_le(vzCDEF, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vs89AB = wasm_v128_andnot(vs89AB, vsatm89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vsCDEF = wasm_v128_andnot(vsCDEF, vsatmCDEF);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+    vt89AB = wasm_v128_andnot(vt89AB, vsatm89AB);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
+    vtCDEF = wasm_v128_andnot(vtCDEF, vsatmCDEF);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    y += 16;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x20.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x20.c
new file mode 100644
index 0000000..2413910
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x20.c
@@ -0,0 +1,254 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    v128_t vxGHIJ = wasm_v128_load(x + 16);
+    x += 20;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+    const v128_t vz89AB = wasm_f32x4_mul(vx89AB, vprescale);
+    const v128_t vzCDEF = wasm_f32x4_mul(vxCDEF, vprescale);
+    const v128_t vzGHIJ = wasm_f32x4_mul(vxGHIJ, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+    v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+    v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
+    v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
+    v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    const v128_t vsatm89AB = wasm_f32x4_le(vz89AB, vsat_cutoff);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    const v128_t vsatmCDEF = wasm_f32x4_le(vzCDEF, vsat_cutoff);
+    v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    const v128_t vsatmGHIJ = wasm_f32x4_le(vzGHIJ, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vs89AB = wasm_v128_andnot(vs89AB, vsatm89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vsCDEF = wasm_v128_andnot(vsCDEF, vsatmCDEF);
+    vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vsGHIJ = wasm_v128_andnot(vsGHIJ, vsatmGHIJ);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+    vt89AB = wasm_v128_andnot(vt89AB, vsatm89AB);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
+    vtCDEF = wasm_v128_andnot(vtCDEF, vsatmCDEF);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
+    vtGHIJ = wasm_v128_andnot(vtGHIJ, vsatmGHIJ);
+    v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+    vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+    vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
+    vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+    const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+    const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
+    vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+    const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    wasm_v128_store(y + 16, vyGHIJ);
+    y += 20;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x24.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x24.c
new file mode 100644
index 0000000..611e516
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x24.c
@@ -0,0 +1,277 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    v128_t vx89AB = wasm_v128_load(x + 8);
+    v128_t vxCDEF = wasm_v128_load(x + 12);
+    v128_t vxGHIJ = wasm_v128_load(x + 16);
+    v128_t vxKLMN = wasm_v128_load(x + 20);
+    x += 24;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+    const v128_t vz89AB = wasm_f32x4_mul(vx89AB, vprescale);
+    const v128_t vzCDEF = wasm_f32x4_mul(vxCDEF, vprescale);
+    const v128_t vzGHIJ = wasm_f32x4_mul(vxGHIJ, vprescale);
+    const v128_t vzKLMN = wasm_f32x4_mul(vxKLMN, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+    v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
+    v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
+    v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
+    v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+    v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
+    v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
+    v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
+    v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+    vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
+    vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
+    vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
+    vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+    v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
+    const v128_t vsatm89AB = wasm_f32x4_le(vz89AB, vsat_cutoff);
+    v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
+    const v128_t vsatmCDEF = wasm_f32x4_le(vzCDEF, vsat_cutoff);
+    v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
+    const v128_t vsatmGHIJ = wasm_f32x4_le(vzGHIJ, vsat_cutoff);
+    v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_hi), vzKLMN);
+    const v128_t vsatmKLMN = wasm_f32x4_le(vzKLMN, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+    vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
+    vs89AB = wasm_v128_andnot(vs89AB, vsatm89AB);
+    vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
+    vsCDEF = wasm_v128_andnot(vsCDEF, vsatmCDEF);
+    vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
+    vsGHIJ = wasm_v128_andnot(vsGHIJ, vsatmGHIJ);
+    vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
+    vsKLMN = wasm_v128_andnot(vsKLMN, vsatmKLMN);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+    vt89AB = wasm_v128_andnot(vt89AB, vsatm89AB);
+    v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
+    vtCDEF = wasm_v128_andnot(vtCDEF, vsatmCDEF);
+    v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
+    vtGHIJ = wasm_v128_andnot(vtGHIJ, vsatmGHIJ);
+    v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
+    vtKLMN = wasm_v128_andnot(vtKLMN, vsatmKLMN);
+    v128_t vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtKLMN), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+    vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
+    vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
+    vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
+    vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+    vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
+    vs89AB = wasm_f32x4_sub(vs89AB, vone);
+    vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
+    vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
+    vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
+    vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
+    vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
+    vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+    vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
+    vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
+    vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
+    vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vtKLMN);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+    const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
+    const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
+    const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
+    const v128_t veKLMN = wasm_f32x4_mul(wasm_f32x4_add(vpKLMN, vsKLMN), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+    const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
+    vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
+    const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
+    vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
+    const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
+    vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
+    const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
+    vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+    const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
+    const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
+    const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
+    const v128_t vyKLMN = wasm_v128_bitselect(veKLMN, vxKLMN, vsignmKLMN);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    wasm_v128_store(y + 8, vy89AB);
+    wasm_v128_store(y + 12, vyCDEF);
+    wasm_v128_store(y + 16, vyGHIJ);
+    wasm_v128_store(y + 20, vyKLMN);
+    y += 24;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x4.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x4.c
new file mode 100644
index 0000000..01cd089
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x4.c
@@ -0,0 +1,118 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x8.c b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x8.c
new file mode 100644
index 0000000..e12d649
--- /dev/null
+++ b/src/f32-velu/gen/velu-wasmsimd-x86-rr2-p6-x8.c
@@ -0,0 +1,185 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f32-velu/wasmsimd-rr2-p6.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+    v128_t vx0123 = wasm_v128_load(x);
+    v128_t vx4567 = wasm_v128_load(x + 4);
+    x += 8;
+
+    const v128_t vz0123 = wasm_f32x4_mul(vx0123, vprescale);
+    const v128_t vz4567 = wasm_f32x4_mul(vx4567, vprescale);
+
+    v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
+    v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
+
+    v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
+    v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
+
+    vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
+    vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
+
+    v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
+    const v128_t vsatm0123 = wasm_f32x4_le(vz0123, vsat_cutoff);
+    v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
+    const v128_t vsatm4567 = wasm_f32x4_le(vz4567, vsat_cutoff);
+
+    vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
+    vs0123 = wasm_v128_andnot(vs0123, vsatm0123);
+    vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
+    vs4567 = wasm_v128_andnot(vs4567, vsatm4567);
+
+    vt0123 = wasm_v128_andnot(vt0123, vsatm0123);
+    v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
+    vt4567 = wasm_v128_andnot(vt4567, vsatm4567);
+    v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
+
+    vp0123 = wasm_f32x4_mul(vp0123, vt0123);
+    vp4567 = wasm_f32x4_mul(vp4567, vt4567);
+
+    vt0123 = wasm_f32x4_mul(vt0123, vs0123);
+    vs0123 = wasm_f32x4_sub(vs0123, vone);
+    vt4567 = wasm_f32x4_mul(vt4567, vs4567);
+    vs4567 = wasm_f32x4_sub(vs4567, vone);
+
+    vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
+    vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
+
+    const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
+    const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
+
+    const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
+    vx0123 = wasm_f32x4_mul(vx0123, vbeta);
+    const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
+    vx4567 = wasm_f32x4_mul(vx4567, vbeta);
+
+    const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
+    const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
+
+    wasm_v128_store(y, vy0123);
+    wasm_v128_store(y + 4, vy4567);
+    y += 8;
+  }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    vs = wasm_v128_andnot(vs, vsatm);
+    vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/neon-lut16-p3.c.in b/src/f32-velu/neon-lut16-p3.c.in
new file mode 100644
index 0000000..97118c3
--- /dev/null
+++ b/src/f32-velu/neon-lut16-p3.c.in
@@ -0,0 +1,199 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 4 == 0
+$assert BATCH_TILE >= 4
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__${"neonfma" if FMA else "neon"}_rr${1 if FMA else 2}_lut16_p3_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
+  $if FMA:
+    const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  $else:
+    const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
+    const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  $if BATCH_TILE > 4:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      $for N in range(0, BATCH_TILE, 4):
+        float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4;
+
+      $for N in range(0, BATCH_TILE, 4):
+        const float32x4_t vz${ABC[N:N+4]} = vmaxq_f32(vmulq_f32(vx${ABC[N:N+4]}, vprescale), vsat_cutoff);
+
+      $for N in range(0, BATCH_TILE, 4):
+        float32x4_t vn${ABC[N:N+4]} = ${VMULADDQ_F32}(vmagic_bias, vz${ABC[N:N+4]}, vlog2e);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const uint64x2_t vidx${ABC[N:N+4]} = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), vindex_mask), 2));
+        const int32x4_t ven${ABC[N:N+4]} = vshlq_n_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), 19);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const uint64_t vidx${ABC[N:N+2]} = vgetq_lane_u64(vidx${ABC[N:N+4]}, 0);
+        const uint64_t vidx${ABC[N+2:N+4]} = vgetq_lane_u64(vidx${ABC[N:N+4]}, 1);
+        int32x2_t vl${ABC[N:N+2]} = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N:N+2]}));
+        int32x2_t vl${ABC[N+2:N+4]} = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N+2:N+4]}));
+        vl${ABC[N:N+2]} = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N:N+2]} >> 32)), vl${ABC[N:N+2]}, 1);
+        vl${ABC[N+2:N+4]} = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32)), vl${ABC[N+2:N+4]}, 1);
+        const int32x4_t vl${ABC[N:N+4]} = vcombine_s32(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vn${ABC[N:N+4]} = vsubq_f32(vn${ABC[N:N+4]}, vmagic_bias);
+        float32x4_t vs${ABC[N:N+4]} = vreinterpretq_f32_s32(vaddq_s32(vl${ABC[N:N+4]}, ven${ABC[N:N+4]}));
+
+      $if FMA:
+        $for N in range(0, BATCH_TILE, 4):
+          float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2);
+      $else:
+        $for N in range(0, BATCH_TILE, 4):
+          float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_hi);
+
+        $for N in range(0, BATCH_TILE, 4):
+          vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_lo);
+
+      $for N in range(0, BATCH_TILE, 4):
+        float32x4_t vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc2, vc3, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = vmulq_f32(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = vmulq_f32(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
+        vs${ABC[N:N+4]} = vsubq_f32(vs${ABC[N:N+4]}, vone);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        const float32x4_t ve${ABC[N:N+4]} = vmulq_f32(vaddq_f32(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const uint32x4_t vm${ABC[N:N+4]} = vcltq_f32(vx${ABC[N:N+4]}, vmovq_n_f32(0.0f));
+        vx${ABC[N:N+4]} = vmulq_f32(vx${ABC[N:N+4]}, vbeta);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const float32x4_t vy${ABC[N:N+4]} = vbslq_f32(vm${ABC[N:N+4]}, ve${ABC[N:N+4]}, vx${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vst1q_f32(y, vy${ABC[N:N+4]}); y += 4;
+    }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    $if FMA:
+      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2);
+    $else:
+      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2_hi);
+      vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = ${VMULADDQ_F32}(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = ${VMULADDQ_F32}(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vlog2e);
+    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
+    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
+
+    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
+    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
+    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
+    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
+
+    vn = vsubq_f32(vn, vmagic_bias);
+    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
+
+    $if FMA:
+      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2);
+    $else:
+      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2_hi);
+      vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_lo);
+    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
+
+    float32x4_t vp = ${VMULADDQ_F32}(vc2, vc3, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = ${VMULADDQ_F32}(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/neon-p6.c.in b/src/f32-velu/neon-p6.c.in
new file mode 100644
index 0000000..4bb5325
--- /dev/null
+++ b/src/f32-velu/neon-p6.c.in
@@ -0,0 +1,179 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 4 == 0
+$assert BATCH_TILE >= 4
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__${"neonfma" if FMA else "neon"}_rr${1 if FMA else 2}_p6_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
+  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
+  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
+
+  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
+  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
+  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
+  $if FMA:
+    const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
+  $else:
+    const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E440p-1f);
+    const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.0105C6p-21f);
+  const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
+  const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
+  const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
+  const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
+  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
+  const float32x4_t vone = vmovq_n_f32(1.0f);
+
+  $if BATCH_TILE > 4:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      $for N in range(0, BATCH_TILE, 4):
+        float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4;
+
+      $for N in range(0, BATCH_TILE, 4):
+        const float32x4_t vz${ABC[N:N+4]} = vmaxq_f32(vmulq_f32(vx${ABC[N:N+4]}, vprescale), vsat_cutoff);
+
+      $for N in range(0, BATCH_TILE, 4):
+        float32x4_t vn${ABC[N:N+4]} = ${VMULADDQ_F32}(vmagic_bias, vz${ABC[N:N+4]}, vlog2e);
+
+      $for N in range(0, BATCH_TILE, 4):
+        float32x4_t vs${ABC[N:N+4]} = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), 23));
+        vn${ABC[N:N+4]} = vsubq_f32(vn${ABC[N:N+4]}, vmagic_bias);
+
+      $if FMA:
+        $for N in range(0, BATCH_TILE, 4):
+          float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2);
+      $else:
+        $for N in range(0, BATCH_TILE, 4):
+          float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_hi);
+
+        $for N in range(0, BATCH_TILE, 4):
+          vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_lo);
+
+      $for N in range(0, BATCH_TILE, 4):
+        float32x4_t vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc5, vc6, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc4, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc3, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc2, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = vmulq_f32(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = vmulq_f32(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
+        vs${ABC[N:N+4]} = vsubq_f32(vs${ABC[N:N+4]}, vone);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        const float32x4_t ve${ABC[N:N+4]} = vmulq_f32(vaddq_f32(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const uint32x4_t vm${ABC[N:N+4]} = vcltq_f32(vx${ABC[N:N+4]}, vmovq_n_f32(0.0f));
+        vx${ABC[N:N+4]} = vmulq_f32(vx${ABC[N:N+4]}, vbeta);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const float32x4_t vy${ABC[N:N+4]} = vbslq_f32(vm${ABC[N:N+4]}, ve${ABC[N:N+4]}, vx${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vst1q_f32(y, vy${ABC[N:N+4]}); y += 4;
+    }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    float32x4_t vx = vld1q_f32(x); x += 4;
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    $if FMA:
+      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2);
+    $else:
+      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2_hi);
+      vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = ${VMULADDQ_F32}(vc5, vc6, vt);
+    vp = ${VMULADDQ_F32}(vc4, vp, vt);
+    vp = ${VMULADDQ_F32}(vc3, vp, vt);
+    vp = ${VMULADDQ_F32}(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = ${VMULADDQ_F32}(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    vst1q_f32(y, vy); y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    float32x4_t vx = vld1q_f32(x);
+
+    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
+
+    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vlog2e);
+    float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
+    vn = vsubq_f32(vn, vmagic_bias);
+
+    $if FMA:
+      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2);
+    $else:
+      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2_hi);
+      vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_lo);
+
+    float32x4_t vp = ${VMULADDQ_F32}(vc5, vc6, vt);
+    vp = ${VMULADDQ_F32}(vc4, vp, vt);
+    vp = ${VMULADDQ_F32}(vc3, vp, vt);
+    vp = ${VMULADDQ_F32}(vc2, vp, vt);
+    vp = vmulq_f32(vp, vt);
+
+    vt = vmulq_f32(vt, vs);
+    vs = vsubq_f32(vs, vone);
+    vp = ${VMULADDQ_F32}(vt, vp, vt);
+    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
+
+    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
+    vx = vmulq_f32(vx, vbeta);
+    const float32x4_t vy = vbslq_f32(vm, ve, vx);
+
+    float32x2_t vy_lo = vget_low_f32(vy);
+    if (n & (2 * sizeof(float))) {
+      vst1_f32(y, vy_lo); y += 2;
+      vy_lo = vget_high_f32(vy);
+    }
+    if (n & (1 * sizeof(float))) {
+      vst1_lane_f32(y, vy_lo, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/scalar-rr2-lut16-p3.c.in b/src/f32-velu/scalar-rr2-lut16-p3.c.in
new file mode 100644
index 0000000..b0bc7ea
--- /dev/null
+++ b/src/f32-velu/scalar-rr2-lut16-p3.c.in
@@ -0,0 +1,240 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE >= 1
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__${"wasm" if WASM else "scalar"}_rr2_lut16_p3_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.800000p19f;
+  const float vlog2e = 0x1.715476p+0f;
+  const uint32_t vindex_mask = UINT32_C(0xF);
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E400p-1f;
+  const float vminus_ln2_lo = -0x1.7F7D1Cp-20f;
+  const float vc3 = 0x1.55561Cp-3f;
+  const float vc2 = 0x1.0001ECp-1f;
+  const float vone = 1.0f;
+
+  $if BATCH_TILE > 1:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      $for N in range(BATCH_TILE):
+        float vx${N} = x[${N}];
+      x += ${BATCH_TILE};
+
+      $for N in range(BATCH_TILE):
+        $if WASM:
+          const float vz${N} = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx${N} * vprescale, vsat_cutoff), 0.0f);
+        $else:
+          const float vz${N} = vx${N} * vprescale;
+
+      $for N in range(BATCH_TILE):
+        float vn${N} = vz${N} * vlog2e + vmagic_bias;
+
+      $for N in range(BATCH_TILE):
+        const uint32_t ven${N} = fp32_to_bits(vn${N}) << 19;
+        const uint32_t vidx${N} = fp32_to_bits(vn${N}) & vindex_mask;
+        vn${N} -= vmagic_bias;
+
+      $for N in range(BATCH_TILE):
+        float vt${N} = vn${N} * vminus_ln2_hi + vz${N};
+        float vs${N} = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx${N}] + ven${N});
+
+      $for N in range(BATCH_TILE):
+        vt${N} = vn${N} * vminus_ln2_lo + vt${N};
+        $if not WASM:
+          if XNN_UNPREDICTABLE(vz${N} <= vsat_cutoff) {
+            vs${N} = 0.0f;
+            vt${N} = 0.0f;
+          }
+
+      $for N in range(BATCH_TILE):
+        float vp${N} = vc3 * vt${N} + vc2;
+
+      $for N in range(BATCH_TILE):
+        vp${N} *= vt${N};
+
+      $for N in range(BATCH_TILE):
+        vt${N} *= vs${N};
+        vs${N} -= vone;
+
+      $for N in range(BATCH_TILE):
+        vp${N} = vp${N} * vt${N} + vt${N};
+
+      $for N in range(BATCH_TILE):
+        const float ve${N} = (vp${N} + vs${N}) * valpha;
+        $if WASM:
+          float vy${N} = __builtin_wasm_max_f32(vx${N} * vbeta, 0.0f);
+        $else:
+          float vy${N} = vx${N} * vbeta;
+
+      $if WASM:
+        $for N in range(BATCH_TILE):
+          vy${N} += __builtin_wasm_min_f32(ve${N}, 0.0f);
+      $else:
+        $for N in range(BATCH_TILE):
+          if XNN_UNPREDICTABLE(vx${N} < 0.0f) {
+            vy${N} = ve${N};
+          }
+
+      $for N in range(BATCH_TILE):
+        y[${N}] = vy${N};
+      y += ${BATCH_TILE};
+    }
+  $if BATCH_TILE == 1:
+    do {
+      float vx = *x++;
+
+      $if WASM:
+        const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+      $else:
+        const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+      $if not WASM:
+        if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+          vs = 0.0f;
+          vt = 0.0f;
+        }
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      $if WASM:
+        float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+        vy += __builtin_wasm_min_f32(ve, 0.0f);
+      $else:
+        float vy = vx * vbeta;
+        if XNN_UNPREDICTABLE(vx < 0.0f) {
+          vy = ve;
+        }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  $elif BATCH_TILE == 2:
+    if XNN_UNLIKELY(n != 0) {
+      float vx = *x;
+
+      $if WASM:
+        const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+      $else:
+        const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      const uint32_t ven = fp32_to_bits(vn) << 19;
+      const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+      vt = vn * vminus_ln2_lo + vt;
+      $if not WASM:
+        if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+          vs = 0.0f;
+          vt = 0.0f;
+        }
+
+      float vp = vc3 * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      $if WASM:
+        float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+        vy += __builtin_wasm_min_f32(ve, 0.0f);
+      $else:
+        float vy = vx * vbeta;
+        if XNN_UNPREDICTABLE(vx < 0.0f) {
+          vy = ve;
+        }
+
+      *y = vy;
+    }
+  $else:
+    if XNN_UNLIKELY(n != 0) {
+      do {
+        float vx = *x++;
+
+        $if WASM:
+          const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+        $else:
+          const float vz = vx * vprescale;
+
+        float vn = vz * vlog2e + vmagic_bias;
+        const uint32_t ven = fp32_to_bits(vn) << 19;
+        const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
+        vn -= vmagic_bias;
+
+        float vt = vn * vminus_ln2_hi + vz;
+        float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
+
+        vt = vn * vminus_ln2_lo + vt;
+        $if not WASM:
+          if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+            vs = 0.0f;
+            vt = 0.0f;
+          }
+
+        float vp = vc3 * vt + vc2;
+        vp *= vt;
+
+        vt *= vs;
+        vs -= vone;
+        vp = vp * vt + vt;
+        const float ve = (vp + vs) * valpha;
+
+        $if WASM:
+          float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+          vy += __builtin_wasm_min_f32(ve, 0.0f);
+        $else:
+          float vy = vx * vbeta;
+          if XNN_UNPREDICTABLE(vx < 0.0f) {
+            vy = ve;
+          }
+
+        *y++ = vy;
+
+        n -= sizeof(float);
+      } while (n != 0);
+    }
+}
diff --git a/src/f32-velu/scalar-rr2-p6.c.in b/src/f32-velu/scalar-rr2-p6.c.in
new file mode 100644
index 0000000..22eaef9
--- /dev/null
+++ b/src/f32-velu/scalar-rr2-p6.c.in
@@ -0,0 +1,252 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE >= 1
+#include <assert.h>
+#include <math.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vunary.h>
+
+#include <fp16/bitcasts.h>
+
+
+void xnn_f32_velu_ukernel__${"wasm" if WASM else "scalar"}_rr2_p6_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+  assert(n % sizeof(float) == 0);
+
+  const float vprescale = params->scalar.prescale;
+  const float valpha = params->scalar.alpha;
+  const float vbeta = params->scalar.beta;
+
+  const float vmagic_bias = 0x1.8000FEp23f;
+  const float vlog2e = 0x1.715476p+0f;
+  const float vsat_cutoff = -0x1.154246p+4f;
+  const float vminus_ln2_hi = -0x1.62E440p-1f;
+  const float vminus_ln2_lo = 0x1.0105C6p-21f;
+  const float vc6 = 0x1.6b7338p-10f;
+  const float vc5 = 0x1.12278Ep-7f;
+  const float vc4 = 0x1.555716p-5f;
+  const float vc3 = 0x1.5554B0p-3f;
+  const float vc2 = 0x1.FFFFFEp-2f;
+  const float vone = 1.0f;
+
+  $if BATCH_TILE > 1:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      $for N in range(BATCH_TILE):
+        float vx${N} = x[${N}];
+      x += ${BATCH_TILE};
+
+      $for N in range(BATCH_TILE):
+        $if WASM:
+          const float vz${N} = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx${N} * vprescale, vsat_cutoff), 0.0f);
+        $else:
+          const float vz${N} = vx${N} * vprescale;
+
+      $for N in range(BATCH_TILE):
+        float vn${N} = vz${N} * vlog2e + vmagic_bias;
+
+      $for N in range(BATCH_TILE):
+        float vs${N} = fp32_from_bits(fp32_to_bits(vn${N}) << 23);
+        vn${N} -= vmagic_bias;
+
+      $for N in range(BATCH_TILE):
+        float vt${N} = vn${N} * vminus_ln2_hi + vz${N};
+
+      $for N in range(BATCH_TILE):
+        vt${N} = vn${N} * vminus_ln2_lo + vt${N};
+
+      $if not WASM:
+        $for N in range(BATCH_TILE):
+          if XNN_UNPREDICTABLE(vz${N} <= vsat_cutoff) {
+            vs${N} = 0.0f;
+            vt${N} = 0.0f;
+          }
+
+      $for N in range(BATCH_TILE):
+        float vp${N} = vc6 * vt${N} + vc5;
+
+      $for N in range(BATCH_TILE):
+        vp${N} = vp${N} * vt${N} + vc4;
+
+      $for N in range(BATCH_TILE):
+        vp${N} = vp${N} * vt${N} + vc3;
+
+      $for N in range(BATCH_TILE):
+        vp${N} = vp${N} * vt${N} + vc2;
+
+      $for N in range(BATCH_TILE):
+        vp${N} *= vt${N};
+
+      $for N in range(BATCH_TILE):
+        vt${N} *= vs${N};
+        vs${N} -= vone;
+
+      $for N in range(BATCH_TILE):
+        vp${N} = vp${N} * vt${N} + vt${N};
+
+      $for N in range(BATCH_TILE):
+        const float ve${N} = (vp${N} + vs${N}) * valpha;
+        $if WASM:
+          float vy${N} = __builtin_wasm_max_f32(vx${N} * vbeta, 0.0f);
+        $else:
+          float vy${N} = vx${N} * vbeta;
+
+      $if WASM:
+        $for N in range(BATCH_TILE):
+          vy${N} += __builtin_wasm_min_f32(ve${N}, 0.0f);
+      $else:
+        $for N in range(BATCH_TILE):
+          if XNN_UNPREDICTABLE(vx${N} < 0.0f) {
+            vy${N} = ve${N};
+          }
+
+      $for N in range(BATCH_TILE):
+        y[${N}] = vy${N};
+      y += ${BATCH_TILE};
+    }
+  $if BATCH_TILE == 1:
+    do {
+      float vx = *x++;
+
+      $if WASM:
+        const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+      $else:
+        const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+      $if not WASM:
+        if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+          vs = 0.0f;
+          vt = 0.0f;
+        }
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      $if WASM:
+        float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+        vy += __builtin_wasm_min_f32(ve, 0.0f);
+      $else:
+        float vy = vx * vbeta;
+        if XNN_UNPREDICTABLE(vx < 0.0f) {
+          vy = ve;
+        }
+
+      *y++ = vy;
+
+      n -= sizeof(float);
+    } while (n != 0);
+  $elif BATCH_TILE == 2:
+    if XNN_UNLIKELY(n != 0) {
+      float vx = *x;
+
+      $if WASM:
+        const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+      $else:
+        const float vz = vx * vprescale;
+
+      float vn = vz * vlog2e + vmagic_bias;
+      float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+      vn -= vmagic_bias;
+
+      float vt = vn * vminus_ln2_hi + vz;
+      vt = vn * vminus_ln2_lo + vt;
+
+      $if not WASM:
+        if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+          vs = 0.0f;
+          vt = 0.0f;
+        }
+
+      float vp = vc6 * vt + vc5;
+      vp = vp * vt + vc4;
+      vp = vp * vt + vc3;
+      vp = vp * vt + vc2;
+      vp *= vt;
+
+      vt *= vs;
+      vs -= vone;
+      vp = vp * vt + vt;
+      const float ve = (vp + vs) * valpha;
+
+      $if WASM:
+        float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+        vy += __builtin_wasm_min_f32(ve, 0.0f);
+      $else:
+        float vy = vx * vbeta;
+        if XNN_UNPREDICTABLE(vx < 0.0f) {
+          vy = ve;
+        }
+
+      *y = vy;
+    }
+  $else:
+    if XNN_UNLIKELY(n != 0) {
+      do {
+        float vx = *x++;
+
+        $if WASM:
+          const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
+        $else:
+          const float vz = vx * vprescale;
+
+        float vn = vz * vlog2e + vmagic_bias;
+        float vs = fp32_from_bits(fp32_to_bits(vn) << 23);
+        vn -= vmagic_bias;
+
+        float vt = vn * vminus_ln2_hi + vz;
+        vt = vn * vminus_ln2_lo + vt;
+
+        $if not WASM:
+          if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
+            vs = 0.0f;
+            vt = 0.0f;
+          }
+
+        float vp = vc6 * vt + vc5;
+        vp = vp * vt + vc4;
+        vp = vp * vt + vc3;
+        vp = vp * vt + vc2;
+        vp *= vt;
+
+        vt *= vs;
+        vs -= vone;
+        vp = vp * vt + vt;
+        const float ve = (vp + vs) * valpha;
+
+        $if WASM:
+          float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
+          vy += __builtin_wasm_min_f32(ve, 0.0f);
+        $else:
+          float vy = vx * vbeta;
+          if XNN_UNPREDICTABLE(vx < 0.0f) {
+            vy = ve;
+          }
+
+        *y++ = vy;
+
+        n -= sizeof(float);
+      } while (n != 0);
+    }
+}
diff --git a/src/f32-velu/sse-rr2-lut16-p3.c.in b/src/f32-velu/sse-rr2-lut16-p3.c.in
new file mode 100644
index 0000000..996f4cc
--- /dev/null
+++ b/src/f32-velu/sse-rr2-lut16-p3.c.in
@@ -0,0 +1,287 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 4 == 0
+$assert BATCH_TILE >= 4
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
+#include <assert.h>
+
+#include <${SSE_HEADER}>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+$ISA = {2: "sse2", 4: "sse41"}[SSE]
+void xnn_f32_velu_ukernel__${ISA}_rr2_lut16_p3_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128i vindex_mask = _mm_set1_epi32(0xF);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
+  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  $if BATCH_TILE > 4:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m128 vx${ABC[0:4]} = _mm_loadu_ps(x);
+      $for N in range(4, BATCH_TILE, 4):
+        __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N});
+      x += ${BATCH_TILE};
+
+      $for N in range(0, BATCH_TILE, 4):
+        const __m128 vz${ABC[N:N+4]} = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx${ABC[N:N+4]}, vprescale));
+
+      $for N in range(0, BATCH_TILE, 4):
+        __m128 vn${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vz${ABC[N:N+4]}, vlog2e), vmagic_bias);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const __m128i vidx${ABC[N:N+4]} = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn${ABC[N:N+4]}), vindex_mask), 2);
+        const __m128i ven${ABC[N:N+4]} = _mm_slli_epi32(_mm_castps_si128(vn${ABC[N:N+4]}), 19);
+
+      #if XNN_ARCH_X86_64
+        $for N in range(0, BATCH_TILE, 4):
+          const uint64_t vidx${ABC[N:N+2]} = (uint64_t) _mm_cvtsi128_si64(vidx${ABC[N:N+4]});
+          $if SSE >= 4:
+            const uint64_t vidx${ABC[N+2:N+4]} = (uint64_t) _mm_extract_epi64(vidx${ABC[N:N+4]}, 1);
+          $else:
+            const uint64_t vidx${ABC[N+2:N+4]} = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx${ABC[N:N+4]}, vidx${ABC[N:N+4]}));
+          const __m128i vl${ABC[N]}   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N:N+2]})));
+          const __m128i vl${ABC[N+2]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N+2:N+4]})));
+          $if SSE >= 4:
+            const __m128i vl${ABC[N:N+2]} = _mm_insert_epi32(vl${ABC[N]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N:N+2]} >> 32))), 1);
+          $else:
+            const __m128i vl${ABC[N+1]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N:N+2]} >> 32))));
+            const __m128i vl${ABC[N:N+2]} = _mm_unpacklo_epi32(vl${ABC[N]}, vl${ABC[N+1]});
+          $if SSE >= 4:
+            const __m128i vl${ABC[N+2:N+4]} = _mm_insert_epi32(vl${ABC[N+2]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32))), 1);
+          $else:
+            const __m128i vl${ABC[N+3]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32))));
+            const __m128i vl${ABC[N+2:N+4]} = _mm_unpacklo_epi32(vl${ABC[N+2]}, vl${ABC[N+3]});
+          const __m128i vl${ABC[N:N+4]} = _mm_unpacklo_epi64(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
+      #else  // !XNN_ARCH_X86_64
+        $for N in range(0, BATCH_TILE, 4):
+          const uint32_t vidx${ABC[N]} = (uint32_t) _mm_cvtsi128_si32(vidx${ABC[N:N+4]});
+          const uint32_t vidx${ABC[N+1]} = (uint32_t) _mm_extract_epi16(vidx${ABC[N:N+4]}, 2);
+          const uint32_t vidx${ABC[N+2]} = (uint32_t) _mm_extract_epi16(vidx${ABC[N:N+4]}, 4);
+          const uint32_t vidx${ABC[N+3]} = (uint32_t) _mm_extract_epi16(vidx${ABC[N:N+4]}, 6);
+          const __m128i vl${ABC[N]}   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N]})));
+          const __m128i vl${ABC[N+2]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+2]})));
+          $if SSE >= 4:
+            const __m128i vl${ABC[N:N+2]} = _mm_insert_epi32(vl${ABC[N]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+1]})), 1);
+          $else:
+            const __m128i vl${ABC[N+1]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+1]})));
+            const __m128i vl${ABC[N:N+2]} = _mm_unpacklo_epi32(vl${ABC[N]}, vl${ABC[N+1]});
+          $if SSE >= 4:
+            const __m128i vl${ABC[N+2:N+4]} = _mm_insert_epi32(vl${ABC[N+2]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+3]})), 1);
+          $else:
+            const __m128i vl${ABC[N+3]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+3]})));
+            const __m128i vl${ABC[N+2:N+4]} = _mm_unpacklo_epi32(vl${ABC[N+2]}, vl${ABC[N+3]});
+          const __m128i vl${ABC[N:N+4]} = _mm_unpacklo_epi64(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
+      #endif  // XNN_ARCH_X86_64
+
+      $for N in range(0, BATCH_TILE, 4):
+        vn${ABC[N:N+4]} = _mm_sub_ps(vn${ABC[N:N+4]}, vmagic_bias);
+        __m128 vs${ABC[N:N+4]} = _mm_castsi128_ps(_mm_add_epi32(vl${ABC[N:N+4]}, ven${ABC[N:N+4]}));
+
+      $for N in range(0, BATCH_TILE, 4):
+        __m128 vt${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vn${ABC[N:N+4]}, vminus_ln2_hi), vz${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vn${ABC[N:N+4]}, vminus_ln2_lo), vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        __m128 vp${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vc3, vt${ABC[N:N+4]}), vc2);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = _mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = _mm_mul_ps(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
+        vs${ABC[N:N+4]} = _mm_sub_ps(vs${ABC[N:N+4]}, vone);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        const __m128 ve${ABC[N:N+4]} = _mm_mul_ps(_mm_add_ps(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
+
+      $for N in range(0, BATCH_TILE, 4):
+        $if SSE < 4:
+          const __m128 vm${ABC[N:N+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx${ABC[N:N+4]})));
+        vx${ABC[N:N+4]} = _mm_mul_ps(vx${ABC[N:N+4]}, vbeta);
+
+      $for N in range(0, BATCH_TILE, 4):
+        $if SSE >= 4:
+          const __m128 vy${ABC[N:N+4]} = _mm_blendv_ps(vx${ABC[N:N+4]}, ve${ABC[N:N+4]}, vx${ABC[N:N+4]});
+        $else:
+          const __m128 vy${ABC[N:N+4]} = _mm_or_ps(_mm_and_ps(ve${ABC[N:N+4]}, vm${ABC[N:N+4]}), _mm_andnot_ps(vm${ABC[N:N+4]}, vx${ABC[N:N+4]}));
+
+      _mm_storeu_ps(y, vy${ABC[0:4]});
+      $for N in range(4, BATCH_TILE, 4):
+        _mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      $if SSE >= 4:
+        const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      $else:
+        const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      $if SSE >= 4:
+        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      $else:
+        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      $if SSE >= 4:
+        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+      $else:
+        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      $if SSE >= 4:
+        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      $else:
+        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      $if SSE >= 4:
+        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+      $else:
+        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    $if SSE < 4:
+      const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    $if SSE >= 4:
+      const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+    $else:
+      const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+
+    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
+    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
+    #if XNN_ARCH_X86_64
+      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
+      $if SSE >= 4:
+        const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
+      $else:
+        const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
+      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
+      $if SSE >= 4:
+        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
+      $else:
+        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
+        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      $if SSE >= 4:
+        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
+      $else:
+        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
+        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #else  // !XNN_ARCH_X86_64
+      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
+      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
+      $if SSE >= 4:
+        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
+      $else:
+        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
+        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
+      $if SSE >= 4:
+        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
+      $else:
+        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
+        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
+    #endif  // XNN_ARCH_X86_64
+    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
+    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    $if SSE < 4:
+      const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    $if SSE >= 4:
+      __m128 vy = _mm_blendv_ps(vx, ve, vx);
+    $else:
+      __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/sse-rr2-p6.c.in b/src/f32-velu/sse-rr2-p6.c.in
new file mode 100644
index 0000000..9562a16
--- /dev/null
+++ b/src/f32-velu/sse-rr2-p6.c.in
@@ -0,0 +1,187 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 4 == 0
+$assert BATCH_TILE >= 4
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
+#include <assert.h>
+
+#include <${SSE_HEADER}>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+$ISA = {2: "sse2", 4: "sse41"}[SSE]
+void xnn_f32_velu_ukernel__${ISA}_rr2_p6_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
+  const __m128 valpha = _mm_load_ps(params->sse.alpha);
+  const __m128 vbeta = _mm_load_ps(params->sse.beta);
+
+  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
+  const __m128 vmagic_bias = _mm_set1_ps(0x1.8000FEp23f);
+  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
+  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E440p-1f);
+  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.0105C6p-21f);
+  const __m128 vc6 = _mm_set1_ps(0x1.6b7338p-10f);
+  const __m128 vc5 = _mm_set1_ps(0x1.12278Ep-7f);
+  const __m128 vc4 = _mm_set1_ps(0x1.555716p-5f);
+  const __m128 vc3 = _mm_set1_ps(0x1.5554B0p-3f);
+  const __m128 vc2 = _mm_set1_ps(0x1.FFFFFEp-2f);
+  const __m128 vone = _mm_set1_ps(1.0f);
+
+  $if BATCH_TILE > 4:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      __m128 vx${ABC[0:4]} = _mm_loadu_ps(x);
+      $for N in range(4, BATCH_TILE, 4):
+        __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N});
+      x += ${BATCH_TILE};
+
+      $for N in range(0, BATCH_TILE, 4):
+        const __m128 vz${ABC[N:N+4]} = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx${ABC[N:N+4]}, vprescale));
+
+      $for N in range(0, BATCH_TILE, 4):
+        __m128 vn${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vz${ABC[N:N+4]}, vlog2e), vmagic_bias);
+
+      $for N in range(0, BATCH_TILE, 4):
+        __m128 vs${ABC[N:N+4]} = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn${ABC[N:N+4]}), 23));
+
+      $for N in range(0, BATCH_TILE, 4):
+        vn${ABC[N:N+4]} = _mm_sub_ps(vn${ABC[N:N+4]}, vmagic_bias);
+
+      $for N in range(0, BATCH_TILE, 4):
+        __m128 vt${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vn${ABC[N:N+4]}, vminus_ln2_hi), vz${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vn${ABC[N:N+4]}, vminus_ln2_lo), vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        __m128 vp${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vc6, vt${ABC[N:N+4]}), vc5);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc4);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc3);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc2);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = _mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = _mm_mul_ps(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
+        vs${ABC[N:N+4]} = _mm_sub_ps(vs${ABC[N:N+4]}, vone);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        const __m128 ve${ABC[N:N+4]} = _mm_mul_ps(_mm_add_ps(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
+
+      $for N in range(0, BATCH_TILE, 4):
+        $if SSE < 4:
+          const __m128 vm${ABC[N:N+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx${ABC[N:N+4]})));
+        vx${ABC[N:N+4]} = _mm_mul_ps(vx${ABC[N:N+4]}, vbeta);
+
+      $for N in range(0, BATCH_TILE, 4):
+        $if SSE >= 4:
+          const __m128 vy${ABC[N:N+4]} = _mm_blendv_ps(vx${ABC[N:N+4]}, ve${ABC[N:N+4]}, vx${ABC[N:N+4]});
+        $else:
+          const __m128 vy${ABC[N:N+4]} = _mm_or_ps(_mm_and_ps(ve${ABC[N:N+4]}, vm${ABC[N:N+4]}), _mm_andnot_ps(vm${ABC[N:N+4]}, vx${ABC[N:N+4]}));
+
+      _mm_storeu_ps(y, vy${ABC[0:4]});
+      $for N in range(4, BATCH_TILE, 4):
+        _mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    __m128 vx = _mm_loadu_ps(x);
+    x += 4;
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    $if SSE < 4:
+      const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    $if SSE >= 4:
+      const __m128 vy = _mm_blendv_ps(vx, ve, vx);
+    $else:
+      const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    _mm_storeu_ps(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    __m128 vx = _mm_loadu_ps(x);
+
+    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
+
+    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
+    __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
+    vn = _mm_sub_ps(vn, vmagic_bias);
+
+    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
+    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
+
+    __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
+    vp = _mm_mul_ps(vp, vt);
+
+    vt = _mm_mul_ps(vt, vs);
+    vs = _mm_sub_ps(vs, vone);
+    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
+    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
+
+    $if SSE < 4:
+      const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
+    vx = _mm_mul_ps(vx, vbeta);
+    $if SSE >= 4:
+      __m128 vy = _mm_blendv_ps(vx, ve, vx);
+    $else:
+      __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
+
+    if (n & (2 * sizeof(float))) {
+      _mm_storel_pi((__m64*) y, vy);
+      vy = _mm_movehl_ps(vy, vy);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      _mm_store_ss(y, vy);
+    }
+  }
+}
diff --git a/src/f32-velu/wasmsimd-rr2-lut16-p3.c.in b/src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
new file mode 100644
index 0000000..8ae13ae
--- /dev/null
+++ b/src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
@@ -0,0 +1,216 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 4 == 0
+$assert BATCH_TILE >= 4
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
+
+void xnn_f32_velu_ukernel__wasmsimd_${"x86" if X86 else "arm"}_rr2_lut16_p3_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p19f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vindex_mask = wasm_i32x4_splat(0xF);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E400p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(-0x1.7F7D1Cp-20f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.55561Cp-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.0001ECp-1f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  $if BATCH_TILE > 4:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      v128_t vx${ABC[0:4]} = wasm_v128_load(x);
+      $for N in range(4, BATCH_TILE, 4):
+        v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
+      x += ${BATCH_TILE};
+
+      $for N in range(0, BATCH_TILE, 4):
+        $if X86:
+          const v128_t vz${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale);
+        $else:
+          const v128_t vz${ABC[N:N+4]} = wasm_f32x4_max(wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale), vsat_cutoff);
+
+      $for N in range(0, BATCH_TILE, 4):
+        v128_t vn${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vz${ABC[N:N+4]}, vlog2e), vmagic_bias);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const v128_t vidx${ABC[N:N+4]} = wasm_i32x4_shl(wasm_v128_and(vn${ABC[N:N+4]}, vindex_mask), 2);
+        const v128_t ven${ABC[N:N+4]} = wasm_i32x4_shl(vn${ABC[N:N+4]}, 19);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const uint64_t vidx${ABC[N:N+2]} = wasm_i64x2_extract_lane(vidx${ABC[N:N+4]}, 0);
+        const uint64_t vidx${ABC[N+2:N+4]} = wasm_i64x2_extract_lane(vidx${ABC[N:N+4]}, 1);
+        const float vl${ABC[N]}   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N:N+2]}));
+        const float vl${ABC[N+1]} = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N:N+2]} >> 32)));
+        const float vl${ABC[N+2]} = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N+2:N+4]}));
+        const float vl${ABC[N+3]} = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32)));
+        const v128_t vl${ABC[N:N+4]} = wasm_f32x4_make(vl${ABC[N]}, vl${ABC[N+1]}, vl${ABC[N+2]}, vl${ABC[N+3]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vn${ABC[N:N+4]} = wasm_f32x4_sub(vn${ABC[N:N+4]}, vmagic_bias);
+        v128_t vs${ABC[N:N+4]} = wasm_i32x4_add(vl${ABC[N:N+4]}, ven${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        v128_t vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_hi), vz${ABC[N:N+4]});
+        $if X86:
+          const v128_t vsatm${ABC[N:N+4]} = wasm_f32x4_le(vz${ABC[N:N+4]}, vsat_cutoff);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_lo), vt${ABC[N:N+4]});
+        $if X86:
+          vs${ABC[N:N+4]} = wasm_v128_andnot(vs${ABC[N:N+4]}, vsatm${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        $if X86:
+          vt${ABC[N:N+4]} = wasm_v128_andnot(vt${ABC[N:N+4]}, vsatm${ABC[N:N+4]});
+        v128_t vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt${ABC[N:N+4]}), vc2);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = wasm_f32x4_mul(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
+        vs${ABC[N:N+4]} = wasm_f32x4_sub(vs${ABC[N:N+4]}, vone);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        const v128_t ve${ABC[N:N+4]} = wasm_f32x4_mul(wasm_f32x4_add(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const v128_t vsignm${ABC[N:N+4]} = wasm_i32x4_shr(vx${ABC[N:N+4]}, 31);
+        vx${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vbeta);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const v128_t vy${ABC[N:N+4]} = wasm_v128_bitselect(ve${ABC[N:N+4]}, vx${ABC[N:N+4]}, vsignm${ABC[N:N+4]});
+
+      wasm_v128_store(y, vy${ABC[0:4]});
+      $for N in range(4, BATCH_TILE, 4):
+        wasm_v128_store(y + ${N}, vy${ABC[N:N+4]});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    $if X86:
+      const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+    $else:
+      const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    $if X86:
+      const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    $if X86:
+      vs = wasm_v128_andnot(vs, vsatm);
+      vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    $if X86:
+      const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+    $else:
+      const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
+    const v128_t ven = wasm_i32x4_shl(vn, 19);
+
+    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
+    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
+    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
+    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
+    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
+    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
+    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
+
+    v128_t vs = wasm_i32x4_add(vl, ven);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    $if X86:
+      const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    $if X86:
+      vs = wasm_v128_andnot(vs, vsatm);
+      vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/f32-velu/wasmsimd-rr2-p6.c.in b/src/f32-velu/wasmsimd-rr2-p6.c.in
new file mode 100644
index 0000000..c339856
--- /dev/null
+++ b/src/f32-velu/wasmsimd-rr2-p6.c.in
@@ -0,0 +1,198 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 4 == 0
+$assert BATCH_TILE >= 4
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/vunary.h>
+#include <xnnpack/common.h>
+
+
+void xnn_f32_velu_ukernel__wasmsimd_${"x86" if X86 else "arm"}_rr2_p6_x${BATCH_TILE}(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+  assert(n != 0);
+  assert(n % sizeof(float) == 0);
+  assert(x != NULL);
+  assert(y != NULL);
+
+  const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
+  const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
+  const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
+
+  const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
+  const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
+  const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
+  const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
+  const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
+  const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
+  const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
+  const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
+  const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
+  const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
+  const v128_t vone = wasm_f32x4_splat(1.0f);
+
+  $if BATCH_TILE > 4:
+    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+      v128_t vx${ABC[0:4]} = wasm_v128_load(x);
+      $for N in range(4, BATCH_TILE, 4):
+        v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
+      x += ${BATCH_TILE};
+
+      $for N in range(0, BATCH_TILE, 4):
+        $if X86:
+          const v128_t vz${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale);
+        $else:
+          const v128_t vz${ABC[N:N+4]} = wasm_f32x4_max(wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale), vsat_cutoff);
+
+      $for N in range(0, BATCH_TILE, 4):
+        v128_t vn${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vz${ABC[N:N+4]}, vlog2e), vmagic_bias);
+
+      $for N in range(0, BATCH_TILE, 4):
+        v128_t vs${ABC[N:N+4]} = wasm_i32x4_shl(vn${ABC[N:N+4]}, 23);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vn${ABC[N:N+4]} = wasm_f32x4_sub(vn${ABC[N:N+4]}, vmagic_bias);
+
+      $for N in range(0, BATCH_TILE, 4):
+        v128_t vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_hi), vz${ABC[N:N+4]});
+        $if X86:
+          const v128_t vsatm${ABC[N:N+4]} = wasm_f32x4_le(vz${ABC[N:N+4]}, vsat_cutoff);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_lo), vt${ABC[N:N+4]});
+        $if X86:
+          vs${ABC[N:N+4]} = wasm_v128_andnot(vs${ABC[N:N+4]}, vsatm${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        $if X86:
+          vt${ABC[N:N+4]} = wasm_v128_andnot(vt${ABC[N:N+4]}, vsatm${ABC[N:N+4]});
+        v128_t vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt${ABC[N:N+4]}), vc5);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc4);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc3);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc2);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        vt${ABC[N:N+4]} = wasm_f32x4_mul(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
+        vs${ABC[N:N+4]} = wasm_f32x4_sub(vs${ABC[N:N+4]}, vone);
+
+      $for N in range(0, BATCH_TILE, 4):
+        vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vt${ABC[N:N+4]});
+
+      $for N in range(0, BATCH_TILE, 4):
+        const v128_t ve${ABC[N:N+4]} = wasm_f32x4_mul(wasm_f32x4_add(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const v128_t vsignm${ABC[N:N+4]} = wasm_i32x4_shr(vx${ABC[N:N+4]}, 31);
+        vx${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vbeta);
+
+      $for N in range(0, BATCH_TILE, 4):
+        const v128_t vy${ABC[N:N+4]} = wasm_v128_bitselect(ve${ABC[N:N+4]}, vx${ABC[N:N+4]}, vsignm${ABC[N:N+4]});
+
+      wasm_v128_store(y, vy${ABC[0:4]});
+      $for N in range(4, BATCH_TILE, 4):
+        wasm_v128_store(y + ${N}, vy${ABC[N:N+4]});
+      y += ${BATCH_TILE};
+    }
+  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
+    v128_t vx = wasm_v128_load(x);
+    x += 4;
+
+    $if X86:
+      const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+    $else:
+      const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    $if X86:
+      const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    $if X86:
+      vs = wasm_v128_andnot(vs, vsatm);
+      vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    wasm_v128_store(y, vy);
+    y += 4;
+  }
+  if XNN_UNLIKELY(n != 0) {
+    v128_t vx = wasm_v128_load(x);
+
+    $if X86:
+      const v128_t vz = wasm_f32x4_mul(vx, vprescale);
+    $else:
+      const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
+
+    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
+    v128_t vs = wasm_i32x4_shl(vn, 23);
+    vn = wasm_f32x4_sub(vn, vmagic_bias);
+
+    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
+    $if X86:
+      const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
+    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
+    $if X86:
+      vs = wasm_v128_andnot(vs, vsatm);
+      vt = wasm_v128_andnot(vt, vsatm);
+
+    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
+    vp = wasm_f32x4_mul(vp, vt);
+
+    vt = wasm_f32x4_mul(vt, vs);
+    vs = wasm_f32x4_sub(vs, vone);
+    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
+    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
+
+    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
+    vx = wasm_f32x4_mul(vx, vbeta);
+    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
+
+    if (n & (2 * sizeof(float))) {
+      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
+      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
+      y += 2;
+    }
+    if (n & (1 * sizeof(float))) {
+      *y = wasm_f32x4_extract_lane(vy, 0);
+    }
+  }
+}
diff --git a/src/init.c b/src/init.c
index e4757d4..e741e5f 100644
--- a/src/init.c
+++ b/src/init.c
@@ -342,6 +342,11 @@
       };
       xnn_params.f32.abs = (xnn_univector_ukernel_function) xnn_f32_vabs_ukernel__neon_x8;
       xnn_params.f32.clamp = (xnn_univector_ukernel_function) xnn_f32_clamp_ukernel__neon_x8;
+      if (cpuinfo_has_arm_neon_fma()) {
+        xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__neonfma_rr1_p6_x8;
+      } else {
+        xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8;
+      }
       xnn_params.f32.hswish = (xnn_univector_ukernel_function) xnn_f32_hswish_ukernel__neon_x16;
       xnn_params.f32.lrelu = (xnn_univector_ukernel_function) xnn_f32_vlrelu_ukernel__neon_x8;
       xnn_params.f32.neg = (xnn_univector_ukernel_function) xnn_f32_vneg_ukernel__neon_x8;
@@ -414,6 +419,44 @@
         .row_tile = 2,
       };
       #ifndef XNN_NO_NCHW_OPERATORS
+        init_flags |= XNN_INIT_FLAG_CHW_OPT;
+
+        xnn_params.f32.spmm = (struct spmm_parameters) {
+          .ukernel = (xnn_spmm_ukernel_function) xnn_f32_spmm_minmax_ukernel_32x1__neon,
+          .mr = 32,
+          .nr = 1,
+        };
+        xnn_params.f32.conv_hwc2chw_3x3c3s2 = (struct conv_hwc2chw_parameters) {
+          .ukernel_with_symm_padding =
+            (xnn_conv_hwc2chw_ukernel_function) xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2,
+          .output_channel_tile = 4,
+          .output_height_tile = 2,
+          .output_width_tile = 2,
+        };
+        xnn_params.f32.dwconv2d_chw_3x3 = (struct dwconv2d_chw_parameters) {
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4,
+          .output_width_tile = 4,
+          .output_height_tile = 2,
+        };
+        xnn_params.f32.dwconv2d_chw_3x3s2 = (struct dwconv2d_chw_parameters) {
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4,
+          .output_width_tile = 4,
+          .output_height_tile = 1,
+        };
+        xnn_params.f32.dwconv2d_chw_5x5 = (struct dwconv2d_chw_parameters) {
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4,
+          .output_width_tile = 4,
+          .output_height_tile = 1,
+        };
+        xnn_params.f32.dwconv2d_chw_5x5s2 = (struct dwconv2d_chw_parameters) {
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4,
+          .output_width_tile = 4,
+          .output_height_tile = 1,
+        };
+        xnn_params.f32.gavgpool_cw = (struct gavgpool_cw_parameters) {
+          .ukernel = (xnn_gavgpool_cw_ukernel_function) xnn_f32_gavgpool_cw_ukernel__neon_x4,
+          .channel_tile = 4,
+        };
         xnn_params.f32.ibilinear_chw = (struct ibilinear_chw_parameters) {
           .ukernel = (xnn_ibilinear_chw_ukernel_function) xnn_f32_ibilinear_chw_ukernel__scalar_p4,
           .channel_tile = 1,
@@ -587,6 +630,7 @@
       };
       xnn_params.f32.abs = (xnn_univector_ukernel_function) xnn_f32_vabs_ukernel__scalar_x4;
       xnn_params.f32.clamp = (xnn_univector_ukernel_function) xnn_f32_clamp_ukernel__scalar_x4;
+      xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4;
       xnn_params.f32.hswish = (xnn_univector_ukernel_function) xnn_f32_hswish_ukernel__scalar_x4;
       xnn_params.f32.lrelu = (xnn_univector_ukernel_function) xnn_f32_vlrelu_ukernel__scalar_x4;
       xnn_params.f32.neg = (xnn_univector_ukernel_function) xnn_f32_vneg_ukernel__scalar_x4;
@@ -652,6 +696,8 @@
         .row_tile = 2,
       };
       #ifndef XNN_NO_NCHW_OPERATORS
+        init_flags |= XNN_INIT_FLAG_CHW_OPT;
+
         xnn_params.f32.spmm = (struct spmm_parameters) {
           .ukernel = (xnn_spmm_ukernel_function) xnn_f32_spmm_minmax_ukernel_8x1__scalar,
           .mr = 8,
@@ -675,28 +721,24 @@
           .output_width_tile = 1,
         };
         xnn_params.f32.dwconv2d_chw_3x3 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_2x1_acc2,
-          .input_width_tile = 1,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_4x1,
+          .output_width_tile = 1,
+          .output_height_tile = 4,
+        };
+        xnn_params.f32.dwconv2d_chw_3x3s2 = (struct dwconv2d_chw_parameters) {
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_2x1_acc2,
           .output_width_tile = 1,
           .output_height_tile = 2,
         };
-        xnn_params.f32.dwconv2d_chw_3x3s2 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_1x1_acc3,
-          .input_width_tile = 1,
-          .output_width_tile = 1,
-          .output_height_tile = 1,
-        };
         xnn_params.f32.dwconv2d_chw_5x5 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4,
-          .input_width_tile = 1,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_2x1_acc2,
           .output_width_tile = 1,
-          .output_height_tile = 1,
+          .output_height_tile = 2,
         };
         xnn_params.f32.dwconv2d_chw_5x5s2 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4,
-          .input_width_tile = 1,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_2x1_acc2,
           .output_width_tile = 1,
-          .output_height_tile = 1,
+          .output_height_tile = 2,
         };
         xnn_params.f32.gavgpool_cw = (struct gavgpool_cw_parameters) {
           .ukernel = (xnn_gavgpool_cw_ukernel_function) xnn_f32_gavgpool_cw_ukernel__scalar_x1,
@@ -752,7 +794,7 @@
   #ifndef XNN_NO_QS8_OPERATORS
     init_flags |= XNN_INIT_FLAG_QS8;
 
-    #if XNN_PLATFORM_IOS
+    #if XNN_PLATFORM_IOS || XNN_PLATFORM_MAC
       #if XNN_ENABLE_ASSEMBLY
         if (cpuinfo_has_arm_neon_dot()) {
           xnn_params.qs8.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
@@ -788,7 +830,7 @@
           xnn_params.qs8.gemm.nr = 8;
         }
       #endif  // XNN_ENABLE_ASSEMBLY
-    #else  // !XNN_PLATFORM_IOS
+    #else  // !XNN_PLATFORM_IOS && !XNN_PLATFORM_MAC
       #if XNN_ENABLE_ASSEMBLY
         if (cpuinfo_has_arm_neon_dot()) {
           switch (cpuinfo_get_core(0)->uarch) {
@@ -859,7 +901,7 @@
           xnn_params.qs8.gemm.nr = 8;
         }
       #endif  // XNN_ENABLE_ASSEMBLY
-    #endif  // XNN_PLATFORM_IOS
+    #endif  // XNN_PLATFORM_IOS || XNN_PLATFORM_MAC
 
     xnn_params.qs8.dwconv[0].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_qs8_dwconv_minmax_ukernel_up8x9__neon_mul16;
     xnn_params.qs8.dwconv[0].channel_tile = 8;
@@ -992,7 +1034,7 @@
   #ifndef XNN_NO_F32_OPERATORS
     init_flags |= XNN_INIT_FLAG_F32;
 
-    #if XNN_PLATFORM_IOS
+    #if XNN_PLATFORM_IOS || XNN_PLATFORM_MAC
       #if XNN_ENABLE_ASSEMBLY
         xnn_params.f32.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_6x8__aarch64_neonfma_cortex_a75);
         xnn_params.f32.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_6x8__aarch64_neonfma_cortex_a75);
@@ -1008,7 +1050,7 @@
         xnn_params.f32.gemm.mr = 6;
         xnn_params.f32.gemm.nr = 8;
        #endif  // XNN_ENABLE_ASSEMBLY
-    #else  // !XNN_PLATFORM_IOS
+    #else  // !XNN_PLATFORM_IOS && !XNN_PLATFORM_MAC
       #if XNN_ENABLE_ASSEMBLY
         switch (cpuinfo_get_core(0)->uarch) {
           case cpuinfo_uarch_cortex_a57:
@@ -1140,7 +1182,7 @@
         xnn_params.f32.gemm.mr = 6;
         xnn_params.f32.gemm.nr = 8;
       #endif  // XNN_ENABLE_ASSEMBLY
-    #endif  // XNN_PLATFORM_IOS
+    #endif  // XNN_PLATFORM_IOS || XNN_PLATFORM_MAC
     xnn_params.f32.gemm2.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_4x2__neonfma_lane_ld64);
     xnn_params.f32.gemm2.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64);
     xnn_params.f32.gemm2.mr = 4;
@@ -1150,11 +1192,11 @@
     xnn_params.f32.dwconv[0].channel_tile = 8;
     xnn_params.f32.dwconv[0].primary_tile = 4;
 
-    #if XNN_PLATFORM_IOS
+    #if XNN_PLATFORM_IOS || XNN_PLATFORM_MAC
       xnn_params.f32.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_f32_dwconv_minmax_ukernel_up8x9__neonfma;
       xnn_params.f32.dwconv[1].channel_tile = 8;
       xnn_params.f32.dwconv[1].primary_tile = 9;
-    #else  // !XNN_PLATFORM_IOS
+    #else  // !XNN_PLATFORM_IOS && !XNN_PLATFORM_MAC
       switch (cpuinfo_get_core(0)->uarch) {
         case cpuinfo_uarch_kryo:
           xnn_params.f32.dwconv[1].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_f32_dwconv_minmax_ukernel_up4x9__neonfma;
@@ -1176,7 +1218,7 @@
           xnn_params.f32.dwconv[1].primary_tile = 9;
           break;
       }
-    #endif  // XNN_PLATFORM_IOS
+    #endif  // XNN_PLATFORM_IOS && XNN_PLATFORM_MAC
 
     xnn_params.f32.dwconv[2].minmax.unipass = (xnn_dwconv_unipass_ukernel_function) xnn_f32_dwconv_minmax_ukernel_up4x25__neonfma_acc2;
     xnn_params.f32.dwconv[2].channel_tile = 4;
@@ -1224,6 +1266,7 @@
     };
     xnn_params.f32.abs = (xnn_univector_ukernel_function) xnn_f32_vabs_ukernel__neon_x8;
     xnn_params.f32.clamp = (xnn_univector_ukernel_function) xnn_f32_clamp_ukernel__neon_x8;
+    xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16;
     xnn_params.f32.hswish = (xnn_univector_ukernel_function) xnn_f32_hswish_ukernel__neon_x16;
     xnn_params.f32.lrelu = (xnn_univector_ukernel_function) xnn_f32_vlrelu_ukernel__neon_x8;
     xnn_params.f32.neg = (xnn_univector_ukernel_function) xnn_f32_vneg_ukernel__neon_x8;
@@ -1289,6 +1332,8 @@
       .row_tile = 2,
     };
     #ifndef XNN_NO_NCHW_OPERATORS
+      init_flags |= XNN_INIT_FLAG_CHW_OPT;
+
       xnn_params.f32.spmm = (struct spmm_parameters) {
         .ukernel = (xnn_spmm_ukernel_function) xnn_f32_spmm_minmax_ukernel_16x1__neonfma_pipelined,
         .mr = 16,
@@ -1313,25 +1358,21 @@
       };
       xnn_params.f32.dwconv2d_chw_3x3 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_3x4,
-        .input_width_tile = 4,
         .output_width_tile = 4,
         .output_height_tile = 3,
       };
       xnn_params.f32.dwconv2d_chw_3x3s2 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4_acc2,
-        .input_width_tile = 4,
         .output_width_tile = 4,
         .output_height_tile = 2,
       };
       xnn_params.f32.dwconv2d_chw_5x5 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4,
-        .input_width_tile = 4,
         .output_width_tile = 4,
         .output_height_tile = 4,
       };
       xnn_params.f32.dwconv2d_chw_5x5s2 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neonfma_1x4_acc2,
-        .input_width_tile = 4,
         .output_width_tile = 4,
         .output_height_tile = 1,
       };
@@ -1717,6 +1758,15 @@
       xnn_params.f32.clamp = (xnn_univector_ukernel_function) xnn_f32_clamp_ukernel__sse_x8;
     }
     if (!XNN_PLATFORM_MOBILE && cpuinfo_has_x86_avx512f()) {
+      xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64;
+    } else if (!XNN_PLATFORM_MOBILE && cpuinfo_has_x86_avx2()) {
+      xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56;
+    } else if (!XNN_PLATFORM_MOBILE && cpuinfo_has_x86_avx()) {
+      xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32;
+    } else {
+      xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12;
+    }
+    if (!XNN_PLATFORM_MOBILE && cpuinfo_has_x86_avx512f()) {
       xnn_params.f32.hswish = (xnn_univector_ukernel_function) xnn_f32_hswish_ukernel__avx512f_x16;
     } else if (!XNN_PLATFORM_MOBILE && cpuinfo_has_x86_fma3()) {
       xnn_params.f32.hswish = (xnn_univector_ukernel_function) xnn_f32_hswish_ukernel__fma3_x16;
@@ -1948,6 +1998,12 @@
       .row_tile = 2,
     };
     #ifndef XNN_NO_NCHW_OPERATORS
+      // Sparse microkernels on x86 currently target only SSE, and on processors
+      // with AVX ISA dense inference is expected to be faster than sparse.
+      if (!cpuinfo_has_x86_avx()) {
+        init_flags |= XNN_INIT_FLAG_CHW_OPT;
+      }
+
       xnn_params.f32.spmm = (struct spmm_parameters) {
         .ukernel = (xnn_spmm_ukernel_function) xnn_f32_spmm_minmax_ukernel_32x1__sse,
         .mr = 32,
@@ -1963,33 +2019,28 @@
       if (!XNN_PLATFORM_MOBILE && cpuinfo_has_x86_ssse3()) {
         xnn_params.f32.dwconv2d_chw_3x3 = (struct dwconv2d_chw_parameters) {
           .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2,
-          .input_width_tile = 4,
           .output_width_tile = 4,
           .output_height_tile = 2,
         };
       } else {
         xnn_params.f32.dwconv2d_chw_3x3 = (struct dwconv2d_chw_parameters) {
           .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2,
-          .input_width_tile = 4,
           .output_width_tile = 4,
           .output_height_tile = 2,
         };
       }
       xnn_params.f32.dwconv2d_chw_3x3s2 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3,
-        .input_width_tile = 4,
         .output_width_tile = 4,
         .output_height_tile = 1,
       };
       xnn_params.f32.dwconv2d_chw_5x5 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_4x4,
-        .input_width_tile = 4,
         .output_width_tile = 4,
         .output_height_tile = 4,
       };
       xnn_params.f32.dwconv2d_chw_5x5s2 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__sse_2x4,
-        .input_width_tile = 4,
         .output_width_tile = 4,
         .output_height_tile = 2,
       };
@@ -2131,10 +2182,10 @@
     init_flags |= XNN_INIT_FLAG_F32;
 
     if (is_wasm_x86) {
-      xnn_params.f32.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
-      xnn_params.f32.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
-      xnn_params.f32.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
-      xnn_params.f32.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      xnn_params.f32.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
+      xnn_params.f32.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
+      xnn_params.f32.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
+      xnn_params.f32.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       xnn_params.f32.gemm.relu.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat);
       xnn_params.f32.gemm.relu.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat);
       xnn_params.f32.gemm.relu.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat);
@@ -2154,10 +2205,10 @@
       xnn_params.f32.gemm2.nr = 2;
       xnn_params.f32.gemm2.log2_kr = 2;
     } else {
-      xnn_params.f32.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
-      xnn_params.f32.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
-      xnn_params.f32.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
-      xnn_params.f32.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      xnn_params.f32.gemm.minmax.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
+      xnn_params.f32.gemm.minmax.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
+      xnn_params.f32.gemm.minmax.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
+      xnn_params.f32.gemm.minmax.igemm1 = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       xnn_params.f32.gemm.relu.gemm = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat);
       xnn_params.f32.gemm.relu.igemm = xnn_init_hmp_igemm_ukernel((xnn_igemm_ukernel_function) xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat);
       xnn_params.f32.gemm.relu.gemm1 = xnn_init_hmp_gemm_ukernel((xnn_gemm_ukernel_function) xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat);
@@ -2279,6 +2330,11 @@
     } else {
       xnn_params.f32.clamp = (xnn_univector_ukernel_function) xnn_f32_clamp_ukernel__wasmsimd_arm_x8;
     }
+    if (is_wasm_x86) {
+      xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20;
+    } else {
+      xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20;
+    }
     xnn_params.f32.hswish = (xnn_univector_ukernel_function) xnn_f32_hswish_ukernel__wasmsimd_x16;
     if (is_wasm_x86) {
       xnn_params.f32.lrelu = (xnn_univector_ukernel_function) xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x8;
@@ -2433,6 +2489,8 @@
       };
     }
     #ifndef XNN_NO_NCHW_OPERATORS
+      init_flags |= XNN_INIT_FLAG_CHW_OPT;
+
       if (is_wasm_x86) {
         xnn_params.f32.spmm = (struct spmm_parameters) {
           .ukernel = (xnn_spmm_ukernel_function) xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86,
@@ -2455,51 +2513,43 @@
       };
       if (is_wasm_x86) {
         xnn_params.f32.dwconv2d_chw_3x3 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4,
-          .input_width_tile = 4,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4,
           .output_width_tile = 4,
           .output_height_tile = 2,
         };
         xnn_params.f32.dwconv2d_chw_3x3s2 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3,
-          .input_width_tile = 4,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2,
           .output_width_tile = 4,
           .output_height_tile = 1,
         };
         xnn_params.f32.dwconv2d_chw_5x5 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4,
-          .input_width_tile = 4,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4,
           .output_width_tile = 4,
           .output_height_tile = 3,
         };
         xnn_params.f32.dwconv2d_chw_5x5s2 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2,
-          .input_width_tile = 4,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2,
           .output_width_tile = 4,
           .output_height_tile = 1,
         };
       } else {
         xnn_params.f32.dwconv2d_chw_3x3 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4,
-          .input_width_tile = 4,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4,
           .output_width_tile = 4,
           .output_height_tile = 2,
         };
         xnn_params.f32.dwconv2d_chw_3x3s2 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3,
-          .input_width_tile = 4,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4,
           .output_width_tile = 4,
           .output_height_tile = 1,
         };
         xnn_params.f32.dwconv2d_chw_5x5 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4,
-          .input_width_tile = 4,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4,
           .output_width_tile = 4,
           .output_height_tile = 3,
         };
         xnn_params.f32.dwconv2d_chw_5x5s2 = (struct dwconv2d_chw_parameters) {
-          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2,
-          .input_width_tile = 4,
+          .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2,
           .output_width_tile = 4,
           .output_height_tile = 1,
         };
@@ -2718,6 +2768,11 @@
     } else {
       xnn_params.f32.hswish = (xnn_univector_ukernel_function) xnn_f32_hswish_ukernel__wasm_x4;
     }
+    if (is_wasm_x86) {
+      xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2;
+    } else {
+      xnn_params.f32.elu = (xnn_univector_ukernel_function) xnn_f32_velu_ukernel__wasm_rr2_p6_x6;
+    }
     xnn_params.f32.lrelu = (xnn_univector_ukernel_function) xnn_f32_vlrelu_ukernel__scalar_x4;
     xnn_params.f32.neg = (xnn_univector_ukernel_function) xnn_f32_vneg_ukernel__scalar_x4;
     if (is_wasm_x86) {
@@ -2795,6 +2850,8 @@
       .row_tile = 2,
     };
     #ifndef XNN_NO_NCHW_OPERATORS
+      init_flags |= XNN_INIT_FLAG_CHW_OPT;
+
       xnn_params.f32.spmm = (struct spmm_parameters) {
         .ukernel = (xnn_spmm_ukernel_function) xnn_f32_spmm_minmax_ukernel_8x1__scalar,
         .mr = 8,
@@ -2819,25 +2876,21 @@
       };
       xnn_params.f32.dwconv2d_chw_3x3 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_2x1_acc2,
-        .input_width_tile = 1,
         .output_width_tile = 1,
         .output_height_tile = 2,
       };
       xnn_params.f32.dwconv2d_chw_3x3s2 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_1x1_acc2,
-        .input_width_tile = 1,
         .output_width_tile = 1,
         .output_height_tile = 1,
       };
       xnn_params.f32.dwconv2d_chw_5x5 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc5,
-        .input_width_tile = 1,
         .output_width_tile = 1,
         .output_height_tile = 1,
       };
       xnn_params.f32.dwconv2d_chw_5x5s2 = (struct dwconv2d_chw_parameters) {
         .ukernel = (xnn_dwconv2d_chw_ukernel_function) xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc5,
-        .input_width_tile = 1,
         .output_width_tile = 1,
         .output_height_tile = 1,
       };
diff --git a/src/math/expm1minus-avx2-rr1-lut4-p4-perm.c b/src/math/expm1minus-avx2-rr1-lut4-p4-perm.c
index b5887c9..444c26d 100644
--- a/src/math/expm1minus-avx2-rr1-lut4-p4-perm.c
+++ b/src/math/expm1minus-avx2-rr1-lut4-p4-perm.c
@@ -80,7 +80,7 @@
 
     // Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/16, log(2)/16].
     //   P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
-    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
     vp = _mm256_fmadd_ps(vp, vt, vc2);
     vp = _mm256_mul_ps(vp, vt);
 
diff --git a/src/math/expm1minus-avx2-rr1-lut8-p4-perm.c b/src/math/expm1minus-avx2-rr1-lut8-p4-perm.c
index b3a5dcb..ab38304 100644
--- a/src/math/expm1minus-avx2-rr1-lut8-p4-perm.c
+++ b/src/math/expm1minus-avx2-rr1-lut8-p4-perm.c
@@ -79,7 +79,7 @@
 
     // Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/16, log(2)/16].
     //   P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
-    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
+    __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
     vp = _mm256_fmadd_ps(vp, vt, vc2);
     vp = _mm256_mul_ps(vp, vt);
 
diff --git a/src/math/expm1minus-avx512f-rr1-lut16-p3-perm.c b/src/math/expm1minus-avx512f-rr1-lut16-p3-perm.c
index d6520b5..fe0714a 100644
--- a/src/math/expm1minus-avx512f-rr1-lut16-p3-perm.c
+++ b/src/math/expm1minus-avx512f-rr1-lut16-p3-perm.c
@@ -67,7 +67,7 @@
     const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
 
     // Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
-    const __m512i vl = _mm512_permutevar_epi32(_mm512_castps_si512(vn), vtable);
+    const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
 
     // Adjust exponent of the value l fetched from the table to get the final s value.
     const __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
diff --git a/src/math/expm1minus-scalar-rr2-lut16-p3.c b/src/math/expm1minus-scalar-rr2-lut16-p3.c
index b7d65c9..3ddb15e 100644
--- a/src/math/expm1minus-scalar-rr2-lut16-p3.c
+++ b/src/math/expm1minus-scalar-rr2-lut16-p3.c
@@ -59,27 +59,28 @@
     //    lower than -25.
     //
     // Shift bits 4:12 into 23:31 (position of floating-point exponent).
-    const uint32_t ve = fp32_to_bits(vn) << 19;
+    const uint32_t ven = fp32_to_bits(vn) << 19;
 
     // Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
     const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
     // Adjust exponent of the value l fetched from the table to get the final s value.
-    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ve);
+    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
 
     // Subtract the large number back to get final n := round(x / log(2), 4).
     vn -= vmagic_bias;
 
-    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
-    // To guarantee this behaviour, we zero out s (scale) for x <= sat_cutoff.
-    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
-      vs = 0.0f;
-    }
-
     // Compute reduced argument t := x - n * log(2).
     // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
     float vt = vn * vminus_ln2_hi + vx;
     vt = vn * vminus_ln2_lo + vt;
 
+    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
+    // To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
+    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
     // Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
     //   P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
     float vp = vc3 * vt + vc2;
diff --git a/src/math/expm1minus-scalar-rr2-lut16-p4.c b/src/math/expm1minus-scalar-rr2-lut16-p4.c
index 11a9834..8a085bb 100644
--- a/src/math/expm1minus-scalar-rr2-lut16-p4.c
+++ b/src/math/expm1minus-scalar-rr2-lut16-p4.c
@@ -60,27 +60,28 @@
     //    lower than -25.
     //
     // Shift bits 4:12 into 23:31 (position of floating-point exponent).
-    const uint32_t ve = fp32_to_bits(vn) << 19;
+    const uint32_t ven = fp32_to_bits(vn) << 19;
 
     // Use bits 0:4 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
     const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
     // Adjust exponent of the value l fetched from the table to get the final s value.
-    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ve);
+    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
 
     // Subtract the large number back to get final n := round(x / log(2), 4).
     vn -= vmagic_bias;
 
-    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
-    // To guarantee this behaviour, we zero out s (scale) for x <= sat_cutoff.
-    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
-      vs = 0.0f;
-    }
-
     // Compute reduced argument t := x - n * log(2).
     // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
     float vt = vn * vminus_ln2_hi + vx;
     vt = vn * vminus_ln2_lo + vt;
 
+    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
+    // To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
+    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
     // Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
     //   P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
     float vp = vc4 * vt + vc3;
diff --git a/src/math/expm1minus-scalar-rr2-lut4-p4.c b/src/math/expm1minus-scalar-rr2-lut4-p4.c
index 627f990..a9d46b1 100644
--- a/src/math/expm1minus-scalar-rr2-lut4-p4.c
+++ b/src/math/expm1minus-scalar-rr2-lut4-p4.c
@@ -60,27 +60,28 @@
     //    lower than -25.
     //
     // Shift bits 2:10 into 23:31 (position of floating-point exponent).
-    const uint32_t ve = fp32_to_bits(vn) << 21;
+    const uint32_t ven = fp32_to_bits(vn) << 21;
 
     // Use bits 0:2 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
     const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
     // Adjust exponent of the value l fetched from the table to get the final s value.
-    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_4[vidx] + ve);
+    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_4[vidx] + ven);
 
     // Subtract the large number back to get final n := round(x / log(2), 2).
     vn -= vmagic_bias;
 
-    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
-    // To guarantee this behaviour, we zero out s (scale) for x <= sat_cutoff.
-    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
-      vs = 0.0f;
-    }
-
     // Compute reduced argument t := x - n * log(2).
     // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
     float vt = vn * vminus_ln2_hi + vx;
     vt = vn * vminus_ln2_lo + vt;
 
+    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
+    // To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
+    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
     // Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/8, log(2)/8].
     //   P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
     float vp = vc4 * vt + vc3;
diff --git a/src/math/expm1minus-scalar-rr2-lut8-p3.c b/src/math/expm1minus-scalar-rr2-lut8-p3.c
index 992b529..c926324 100644
--- a/src/math/expm1minus-scalar-rr2-lut8-p3.c
+++ b/src/math/expm1minus-scalar-rr2-lut8-p3.c
@@ -59,27 +59,28 @@
     //    lower than -25.
     //
     // Shift bits 3:11 into 23:31 (position of floating-point exponent).
-    const uint32_t ve = fp32_to_bits(vn) << 20;
+    const uint32_t ven = fp32_to_bits(vn) << 20;
 
     // Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
     const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
     // Adjust exponent of the value l fetched from the table to get the final s value.
-    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_8[vidx] + ve);
+    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_8[vidx] + ven);
 
     // Subtract the large number back to get final n := round(x / log(2), 3).
     vn -= vmagic_bias;
 
-    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
-    // To guarantee this behaviour, we zero out s (scale) for x <= sat_cutoff.
-    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
-      vs = 0.0f;
-    }
-
     // Compute reduced argument t := x - n * log(2).
     // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
     float vt = vn * vminus_ln2_hi + vx;
     vt = vn * vminus_ln2_lo + vt;
 
+    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
+    // To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
+    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
     // Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/16, log(2)/16].
     //   P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
     float vp = vc3 * vt + vc2;
diff --git a/src/math/expm1minus-scalar-rr2-lut8-p4.c b/src/math/expm1minus-scalar-rr2-lut8-p4.c
index abe11ac..5d7899b 100644
--- a/src/math/expm1minus-scalar-rr2-lut8-p4.c
+++ b/src/math/expm1minus-scalar-rr2-lut8-p4.c
@@ -60,27 +60,28 @@
     //    lower than -25.
     //
     // Shift bits 3:11 into 23:31 (position of floating-point exponent).
-    const uint32_t ve = fp32_to_bits(vn) << 20;
+    const uint32_t ven = fp32_to_bits(vn) << 20;
 
     // Use bits 0:3 bits of n, as integer, as an index for table lookup of l := 2**frac(n).
     const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
     // Adjust exponent of the value l fetched from the table to get the final s value.
-    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_8[vidx] + ve);
+    float vs = fp32_from_bits(xnn_table_exp2minus_k_over_8[vidx] + ven);
 
     // Subtract the large number back to get final n := round(x / log(2), 3).
     vn -= vmagic_bias;
 
-    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
-    // To guarantee this behaviour, we zero out s (scale) for x <= sat_cutoff.
-    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
-      vs = 0.0f;
-    }
-
     // Compute reduced argument t := x - n * log(2).
     // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
     float vt = vn * vminus_ln2_hi + vx;
     vt = vn * vminus_ln2_lo + vt;
 
+    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
+    // To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
+    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
     // Compute degree-4 polynomial approximation for exp(t) - 1 on [-log(2)/16, log(2)/16].
     //   P(t) = t * (1 + t * (c2 + t * (c3 + t * c4))) = t + t * (t * (c2 + t * (c3 + t * c4))) = t + t * p
     float vp = vc4 * vt + vc3;
diff --git a/src/math/expm1minus-scalar-rr2-p5.c b/src/math/expm1minus-scalar-rr2-p5.c
index 9d4af3a..1e7c534 100644
--- a/src/math/expm1minus-scalar-rr2-p5.c
+++ b/src/math/expm1minus-scalar-rr2-p5.c
@@ -54,17 +54,18 @@
     // Subtract the large number back to get final n := round(x / log(2)).
     vn -= vmagic_bias;
 
-    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
-    // To guarantee this behaviour, we zero out s (scale) for x <= sat_cutoff.
-    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
-      vs = 0.0f;
-    }
-
     // Compute reduced argument t := x - n * log(2).
     // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
     float vt = vn * vminus_ln2_hi + vx;
     vt = vn * vminus_ln2_lo + vt;
 
+    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
+    // To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
+    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
     // Compute degree-5 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
     //   P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
     //        = t + t * (t * (c2 + t * (c3 + t * (c4 + t * c5)))) = t + t * p
diff --git a/src/math/expm1minus-scalar-rr2-p6.c b/src/math/expm1minus-scalar-rr2-p6.c
index 9b893ff..359d943 100644
--- a/src/math/expm1minus-scalar-rr2-p6.c
+++ b/src/math/expm1minus-scalar-rr2-p6.c
@@ -55,17 +55,18 @@
     // Subtract the large number back to get final n := round(x / log(2)).
     vn -= vmagic_bias;
 
-    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
-    // To guarantee this behaviour, we zero out s (scale) for x <= sat_cutoff.
-    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
-      vs = 0.0f;
-    }
-
     // Compute reduced argument t := x - n * log(2).
     // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
     float vt = vn * vminus_ln2_hi + vx;
     vt = vn * vminus_ln2_lo + vt;
 
+    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
+    // To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
+    if XNN_UNPREDICTABLE(vx <= vsat_cutoff) {
+      vs = 0.0f;
+      vt = 0.0f;
+    }
+
     // Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
     //   P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
     //        = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
diff --git a/src/math/expm1minus-wasmsimd-rr2-lut16-p3-andnot.c b/src/math/expm1minus-wasmsimd-rr2-lut16-p3-andnot.c
index d06f426..ffa93e9 100644
--- a/src/math/expm1minus-wasmsimd-rr2-lut16-p3-andnot.c
+++ b/src/math/expm1minus-wasmsimd-rr2-lut16-p3-andnot.c
@@ -76,15 +76,17 @@
     // Subtract the large number back to get final n := round(x / log(2), 4).
     vn = wasm_f32x4_sub(vn, vmagic_bias);
 
-    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
-    // To guarantee this behaviour, we zero out s (scale) for x <= sat_cutoff.
-    vs = wasm_v128_andnot(vs, wasm_f32x4_le(vx, vsat_cutoff));
-
     // Compute reduced argument t := x - n * log(2).
     // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
     v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
     vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
 
+    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
+    // To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
+    const v128_t vm = wasm_f32x4_le(vx, vsat_cutoff);
+    vs = wasm_v128_andnot(vs, vm);
+    vt = wasm_v128_andnot(vt, vm);
+
     // Compute degree-3 polynomial approximation for exp(t) - 1 on [-log(2)/32, log(2)/32].
     //   P(t) = t * (1 + t * (c2 + t * c3)) = t + t * (t * (c2 + t * c3)) = t + t * p
     v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
diff --git a/src/math/expm1minus-wasmsimd-rr2-lut16-p3-max.c b/src/math/expm1minus-wasmsimd-rr2-lut16-p3-max.c
index 4ac3cec..1f26b98 100644
--- a/src/math/expm1minus-wasmsimd-rr2-lut16-p3-max.c
+++ b/src/math/expm1minus-wasmsimd-rr2-lut16-p3-max.c
@@ -45,7 +45,7 @@
     // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
     // To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
     // expm1f(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
-    vx = wasm_f32x4_max(vsat_cutoff, vx);
+    vx = wasm_f32x4_max(vx, vsat_cutoff);
 
     // Compute reduced argument n := round(x / log(2), 4).
     // We do it by adding a large number (magic bias), which cause rounding of the result to 4 fractional bits, then
diff --git a/src/math/expm1minus-wasmsimd-rr2-p6-andnot.c b/src/math/expm1minus-wasmsimd-rr2-p6-andnot.c
index fe34b01..1e5d1f8 100644
--- a/src/math/expm1minus-wasmsimd-rr2-p6-andnot.c
+++ b/src/math/expm1minus-wasmsimd-rr2-p6-andnot.c
@@ -57,15 +57,17 @@
     // Subtract the large number back to get final n := round(x / log(2)).
     vn = wasm_f32x4_sub(vn, vmagic_bias);
 
-    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
-    // To guarantee this behaviour, we zero out s (scale) for x <= sat_cutoff.
-    vs = wasm_v128_andnot(vs, wasm_f32x4_le(vx, vsat_cutoff));
-
     // Compute reduced argument t := x - n * log(2).
     // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
     v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
     vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
 
+    // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
+    // To guarantee this behaviour, we zero out s (scale) and t (reduced argument) for x <= sat_cutoff.
+    const v128_t vm = wasm_f32x4_le(vx, vsat_cutoff);
+    vs = wasm_v128_andnot(vs, vm);
+    vt = wasm_v128_andnot(vt, vm);
+
     // Compute degree-6 polynomial approximation for exp(t) - 1 on [-log(2)/2, log(2)/2].
     //   P(t) = t * (1 + t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6)))))
     //        = t + t * (t * (c2 + t * (c3 + t * (c4 + t * (c5 + t * c6))))) = t + t * p
diff --git a/src/math/expm1minus-wasmsimd-rr2-p6-max.c b/src/math/expm1minus-wasmsimd-rr2-p6-max.c
index 4ee9439..59a4198 100644
--- a/src/math/expm1minus-wasmsimd-rr2-p6-max.c
+++ b/src/math/expm1minus-wasmsimd-rr2-p6-max.c
@@ -42,7 +42,7 @@
     // The function saturates at -1 for large negative inputs: expm1f(x) == -1.0f for x <= sat_cutoff ~= -17.328680.
     // To guarantee this behaviour, we clip input at sat_cutoff, and leverage the fact that for our implementation
     // expm1f(sat_cutoff) == -1.0f. NaN inputs are passed unchanged.
-    vx = wasm_f32x4_max(vsat_cutoff, vx);
+    vx = wasm_f32x4_max(vx, vsat_cutoff);
 
     // Compute reduced argument n := round(x / log(2)).
     // We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
diff --git a/src/math/sigmoid-neon-frac-p9-p10-nr1recps.c b/src/math/sigmoid-neon-frac-p9-p10-nr1recps.c
deleted file mode 100644
index a0dc091..0000000
--- a/src/math/sigmoid-neon-frac-p9-p10-nr1recps.c
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-
-#include <arm_neon.h>
-
-#include <xnnpack/common.h>
-#include <xnnpack/vunary.h>
-
-
-void xnn_math_f32_sigmoid__neon_frac_p9_p10_nr1recps(
-    size_t n,
-    const float* input,
-    float* output)
-{
-  assert(n % (4 * sizeof(float)) == 0);
-
-  const float32x4_t vhalf = vmovq_n_f32(0.5f);
-
-  // The coefficients of the numerator polynomial (odd).
-  const float32x4_t valpha_1 = vmovq_n_f32(2.48287947061529e-01);
-  const float32x4_t valpha_3 = vmovq_n_f32(8.51377133304701e-03);
-  const float32x4_t valpha_5 = vmovq_n_f32(6.08574864600143e-05);
-  const float32x4_t valpha_7 = vmovq_n_f32(1.15627324459942e-07);
-  const float32x4_t valpha_9 = vmovq_n_f32(4.37031012579801e-11);
-
-  // The coefficients of the denominator polynomial (even).
-  const float32x4_t vbeta_0 =  vmovq_n_f32(9.93151921023180e-01);
-  const float32x4_t vbeta_2 =  vmovq_n_f32(1.16817656904453e-01);
-  const float32x4_t vbeta_4 =  vmovq_n_f32(1.70198817374094e-03);
-  const float32x4_t vbeta_6 =  vmovq_n_f32(6.29106785017040e-06);
-  const float32x4_t vbeta_8 =  vmovq_n_f32(5.76102136993427e-09);
-  const float32x4_t vbeta_10 = vmovq_n_f32(6.10247389755681e-13);
-
-  // Sigmoid ~saturates outside of this range anyway.
-  const float32x4_t vsigmoid_maxinput = vdupq_n_f32(18.f);
-  const float32x4_t vsigmoid_mininput = vdupq_n_f32(-18.f);
-
-  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
-    float32x4_t vn = vld1q_f32(input); input += 4;
-
-    vn = vminq_f32(vn, vsigmoid_maxinput);
-    vn = vmaxq_f32(vn, vsigmoid_mininput);
-
-    const float32x4_t vn_sq = vmulq_f32(vn, vn);
-
-    // Evaluate numerator polynomial
-    float32x4_t vnum = vmlaq_f32(valpha_7, vn_sq, valpha_9);
-
-    vnum = vmlaq_f32(valpha_5, vn_sq, vnum);
-    vnum = vmlaq_f32(valpha_3, vn_sq, vnum);
-    vnum = vmlaq_f32(valpha_1, vn_sq, vnum);
-    vnum = vmulq_f32(vn, vnum);
-
-    // Evaluate denominator polynomial
-
-    float32x4_t vdenom = vmlaq_f32(vbeta_8, vn_sq, vbeta_10);
-    vdenom = vmlaq_f32(vbeta_6, vn_sq, vdenom);
-    vdenom = vmlaq_f32(vbeta_4, vn_sq, vdenom);
-    vdenom = vmlaq_f32(vbeta_2, vn_sq, vdenom);
-    vdenom = vmlaq_f32(vbeta_0, vn_sq, vdenom);
-
-    // Do division, one NR iteration
-
-    float32x4_t vrecp = vrecpeq_f32(vdenom);
-    vrecp = vmulq_f32(vrecp, vrecpsq_f32(vrecp, vdenom));
-
-    const float32x4_t vsigmoid = vmlaq_f32(vhalf, vnum, vrecp);
-
-    vst1q_f32(output, vsigmoid); output += 4;
-  }
-}
diff --git a/src/math/sigmoid-neon-rr1-lut2048-p1-nr2recps.c b/src/math/sigmoid-neon-rr1-lut2048-p1-nr2recps.c
deleted file mode 100644
index 73bb6e7..0000000
--- a/src/math/sigmoid-neon-rr1-lut2048-p1-nr2recps.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <stddef.h>
-
-#include <arm_neon.h>
-
-#include <xnnpack/common.h>
-#include <xnnpack/math-stubs.h>
-
-
-// Table of exp2(k / 2048) values decremented (as integer) by (k << 12), k = 0..2048
-extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
-
-void xnn_math_f32_sigmoid__neon_rr1_lut2048_p1_nr2recps(
-    size_t n,
-    const float* input,
-    float* output)
-{
-  assert(n % (4 * sizeof(float)) == 0);
-
-  // Large number such that ulp(magic bias) == exp2(-11)
-  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p12f);
-  const float32x4_t vminus_log2e = vmovq_n_f32(-0x1.715476p0f);
-  // Mask for the lowest 11 bits
-  const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
-  const float32x4_t vln2 = vmovq_n_f32(0x1.62E43p-1f);
-  // Coefficient of polynomial approximation of exp(-t) ~ 1 + t * c1 on [-log(2)/2048, log(2)/2048]
-  const float32x4_t vc1 = vmovq_n_f32(-0x1.FFFFFEp-1f);
-  const float32x4_t vone = vmovq_n_f32(1.0f);
-  // The largest z for which sigmoidf(-z) is normalized.
-  // This number is also the largest z for which expf(-z) is normalized.
-  const float32x4_t vdenorm_cutoff = vmovq_n_f32(-0x1.5D589Ep+6f);
-
-  for (; n != 0; n -= 4 * sizeof(float)) {
-    const float32x4_t vx = vld1q_f32(input); input += 4;
-
-    // General structure of the algorithm:
-    //
-    //           / exp(x) / (1 + exp(x)) if x <= 0
-    //   f[x] :=
-    //           \ 1 - f[-x] if x >= 0
-    //
-    // First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
-    // then replace result with 1 - f[-z] if x >= 0.
-    const float32x4_t vz = vabsq_f32(vx);
-
-    // Compute reduced argument n := round(-z / log(2), 11).
-    // We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
-    // the large number back. The trick with adding large number is valid only within certain bounds
-    // (|-z / log(2)| <= 2**11, i.e. |z| <= 0x1.62E43p+10 = 1419.5654296875), but that is acceptable, because inputs x
-    // outside of [-87.336544, 17.328678] (i.e. z outsize [0, 87.336544]) underflow or saturate sigmoidf(x). We fixup
-    // the result for such inputs at the very end of the algorithm.
-    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
-
-    // Create a floating-point number s (scale) such that s := 2**n for such inputs that sigmoidf(-z) is normalized,
-    // i.e. 0 <= z <= 87.33642. As n has 11 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s
-    // in two steps:
-    // 1. Fetch 2**frac(n) from the table using the 11 low bits of n, as integer. Note that the fetched values are in
-    //    the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
-    // 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
-    //    number, because for 0 <= z <= 87.33642 (inputs for which sigmoidf(z) is normalized) we have
-    //    -126 <= int(n) <= 0, and thus the adjusted exponent is not lower than -126.
-    //
-    // Shift bits 11:19 into 23:31 (position of floating-point exponent).
-    const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
-
-    // Use bits 0:11 of n, as integer, as an index for table lookup of l := 2**frac(n).
-    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
-    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
-    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
-    float32x2_t vl_lo = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_2048 + (uint32_t) vidx_lo));
-    float32x2_t vl_hi = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_2048 + (uint32_t) vidx_hi));
-    vl_lo = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_2048 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
-    vl_hi = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_2048 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
-    const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
-    // Adjust exponent of the value l fetched from the table to get the final s value.
-    const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
-
-    // Subtract the large number back to get the final n := round(-z / log(2), 11) as a floating-point number.
-    vn = vsubq_f32(vn, vmagic_bias);
-
-    // Compute reduced argument t := (z + n * log(2)). Note that -t = -z - n * log(2).
-    float32x4_t vt = vmlaq_f32(vz, vn, vln2);
-
-    // Compute degree-1 polynomial approximation for exp(-t) on [-log(2)/2048, log(2)/2048]:
-    //   P(t) = 1 + t * c1 = 1 + p
-    const float32x4_t vp = vmulq_f32(vt, vc1);
-
-    // Reconstruct the exp(-z) value:
-    //   e = s * (1 + t * c1)
-    //     = s * (1 + p)
-    //     = s + s * p
-    const float32x4_t vy = vmlaq_f32(vs, vs, vp);
-
-    // Denominator of the sigmoid fraction: 1.0 + exp(-z)
-    const float32x4_t vd = vaddq_f32(vy, vone);
-
-    // Use Newton-Raphson method (2 iterations) to compute reciprocal of denominator.
-    // Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
-    // Thus the reciprocal of the denominator never overflows.
-    float32x4_t vr = vrecpeq_f32(vd);
-    vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
-    vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
-
-    // Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
-    float32x4_t vf = vmulq_f32(vy, vr);
-
-    // For inputs below denormal cutoff, replace output with +0.0f.
-    // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
-    vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
-
-    // Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
-    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
-    vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
-
-    vst1q_f32(output, vf); output += 4;
-  }
-}
diff --git a/src/math/sigmoid-neon-rr1-lut64-p2-nr2recps.c b/src/math/sigmoid-neon-rr1-lut64-p2-nr2recps.c
deleted file mode 100644
index 8a97290..0000000
--- a/src/math/sigmoid-neon-rr1-lut64-p2-nr2recps.c
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <stddef.h>
-
-#include <arm_neon.h>
-
-#include <xnnpack/common.h>
-#include <xnnpack/math-stubs.h>
-
-
-// Table of exp2(k / 64) values decremented (as integer) by (k << 17), k = 0..63
-extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
-
-void xnn_math_f32_sigmoid__neon_rr1_lut64_p2_nr2recps(
-    size_t n,
-    const float* input,
-    float* output)
-{
-  assert(n % (4 * sizeof(float)) == 0);
-
-  // Large number such that ulp(magic bias) == exp2(-6)
-  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p17f);
-  const float32x4_t vminus_log2e = vmovq_n_f32(-0x1.715476p0f);
-  // Mask for the lowest 6 bits
-  const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
-  const float32x4_t vln2 = vmovq_n_f32(0x1.62E43p-1f);
-  // Coefficient of polynomial approximation of exp(-t) ~ 1 + t * (1 + t * c2) on [-log(2)/128, log(2)/128]
-  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFF0Ap-2f);
-  const float32x4_t vone = vmovq_n_f32(1.0f);
-  // The largest z for which sigmoidf(-z) is normalized.
-  // This number is also the largest z for which expf(-z) is normalized.
-  const float32x4_t vdenorm_cutoff = vmovq_n_f32(-0x1.5D589Ep+6f);
-
-  for (; n != 0; n -= 4 * sizeof(float)) {
-    const float32x4_t vx = vld1q_f32(input); input += 4;
-
-    // General structure of the algorithm:
-    //
-    //           / exp(x) / (1 + exp(x)) if x <= 0
-    //   f[x] :=
-    //           \ 1 - f[-x] if x >= 0
-    //
-    // First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
-    // then replace result with 1 - f[-z] if x >= 0.
-    const float32x4_t vz = vabsq_f32(vx);
-
-    // Compute reduced argument n := round(-z / log(2), 6).
-    // We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
-    // the large number back. The trick with adding large number is valid only within certain bounds
-    // (|-z / log(2)| <= 2**16, i.e. |z| <= 0x1.62E43p+15 = 5814540.0), but that is acceptable, because inputs x
-    // outside of [-87.336544, 17.328678] (i.e. z outsize [0, 87.336544]) underflow or saturate sigmoidf(x). We fixup
-    // the result for such inputs at the very end of the algorithm.
-    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
-
-    // Create a floating-point number s (scale) such that s := 2**n for such inputs that sigmoidf(-z) is normalized,
-    // i.e. 0 <= z <= 87.33642. As n has 6 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s
-    // in two steps:
-    // 1. Fetch 2**frac(n) from the table using the 6 low bits of n, as integer. Note that the fetched values are in
-    //    the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
-    // 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
-    //    number, because for 0 <= z <= 87.33642 (inputs for which sigmoidf(z) is normalized) we have
-    //    -126 <= int(n) <= 0, and thus the adjusted exponent is not lower than -126.
-    //
-    // Shift bits 6:14 into 23:31 (position of floating-point exponent).
-    const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
-
-    // Use bits 0:6 of n, as integer, as an index for table lookup of l := 2**frac(n).
-    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
-    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
-    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
-    float32x2_t vl_lo = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo));
-    float32x2_t vl_hi = vld1_dup_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi));
-    vl_lo = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
-    vl_hi = vld1_lane_f32((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
-    const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
-    // Adjust exponent of the value l fetched from the table to get the final s value.
-    const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
-
-    // Subtract the large number back to get the final n := round(-z / log(2), 6) as a floating-point number.
-    vn = vsubq_f32(vn, vmagic_bias);
-
-    // Compute reduced argument t := (z + n * log(2)). Note that -t = -z - n * log(2).
-    float32x4_t vt = vmlaq_f32(vz, vn, vln2);
-
-    // Compute degree-2 polynomial approximation for exp(-t) on [-log(2)/128, log(2)/128].
-    //   P(t) = 1 + t * (-1 + t * c2) = 1 - (t - t * (t * c2)) = 1 - p
-    float32x4_t vp = vmulq_f32(vt, vc2);
-    vp = vmlsq_f32(vt, vp, vt);
-
-    // Reconstruct the exp(-z) value:
-    //   e = s * (1 + t * (-1 + t * c2))
-    //     = s * (1 - p)
-    //     = s - s * p
-    const float32x4_t vy = vmlsq_f32(vs, vs, vp);
-
-    // Denominator of the sigmoid fraction: 1.0 + exp(-z)
-    const float32x4_t vd = vaddq_f32(vy, vone);
-
-    // Use Newton-Raphson method (2 iterations) to compute reciprocal of denominator.
-    // Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
-    // Thus the reciprocal of the denominator never overflows.
-    float32x4_t vr = vrecpeq_f32(vd);
-    vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
-    vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
-
-    // Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
-    float32x4_t vf = vmulq_f32(vy, vr);
-
-    // For inputs below denormal cutoff, replace output with +0.0f.
-    // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
-    vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
-
-    // Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
-    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
-    vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
-
-    vst1q_f32(output, vf); output += 4;
-  }
-}
diff --git a/src/math/sigmoid-neon-rr1-p5-nr2recps.c b/src/math/sigmoid-neon-rr1-p5-nr2recps.c
deleted file mode 100644
index 3ba2893..0000000
--- a/src/math/sigmoid-neon-rr1-p5-nr2recps.c
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <stddef.h>
-
-#include <arm_neon.h>
-
-#include <xnnpack/math-stubs.h>
-
-
-void xnn_math_f32_sigmoid__neon_rr1_p5_nr2recps(
-    size_t n,
-    const float* input,
-    float* output)
-{
-  assert(n % (4 * sizeof(float)) == 0);
-
-  // Large number such that ulp(magic bias) == 1 and magic bias === 127 mod 2**22.
-  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
-  const float32x4_t vminus_log2e = vmovq_n_f32(-0x1.715476p+0f);
-  const float32x4_t vln2 = vmovq_n_f32(0x1.62E43p-1f);
-  // Coefficient of polynomial approximation of
-  // exp(-t) ~ 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))) on [-log(2)/2, log(2)/2]
-  const float32x4_t vc5 = vmovq_n_f32(-0x1.0F9F9Cp-7f);
-  const float32x4_t vc4 = vmovq_n_f32(0x1.573A1Ap-5f);
-  const float32x4_t vc3 = vmovq_n_f32(-0x1.555A80p-3f);
-  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFDC6p-2f);
-  const float32x4_t vc1 = vmovq_n_f32(-0x1.FFFFF6p-1f);
-  const float32x4_t vone = vmovq_n_f32(1.0f);
-  // The largest z for which sigmoidf(-z) is normalized.
-  // This number is also the largest z for which expf(-z) is normalized.
-  const float32x4_t vdenorm_cutoff = vmovq_n_f32(-0x1.5D589Ep+6f);
-
-  for (; n != 0; n -= 4 * sizeof(float)) {
-    const float32x4_t vx = vld1q_f32(input); input += 4;
-
-    // General structure of the algorithm:
-    //           / exp(x) / (1 + exp(x)) if x <= 0
-    //   f[x] :=
-    //           \ 1 - f[-x] if x >= 0
-    //
-    // First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
-    // then replace result with 1 - f[-z] if x >= 0.
-    const float32x4_t vz = vabsq_f32(vx);
-
-    // Compute reduced argument n := round(-z / log(2)).
-    // We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
-    // the large number back. The trick with adding large number is valid only within certain bounds
-    // (|-z / log(2)| <= 2**22, i.e. |z| <= 0x1.62E43p+22 = 5814540.0), but that is acceptable, because inputs x
-    // outside of [-87.336544, 17.328678] (i.e. z outsize [0, 87.336544]) underflow or saturate sigmoidf(x). We fixup
-    // the result for such inputs at the very end of the algorithm.
-    float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
-
-    // Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
-    // -87.336544 <= -z <= 0.0, and -126 <= n <= 0 accordingly.
-    const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
-
-    // Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
-    vn = vsubq_f32(vn, vmagic_bias);
-
-    // Compute reduced argument t := z + n * log(2). Note that -t = -z - n * log(2).
-    float32x4_t vt = vmlaq_f32(vz, vn, vln2);
-
-    // Compute degree-5 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
-    //   P5(t) = 1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
-    float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
-    vp = vmlaq_f32(vc3, vp, vt);
-    vp = vmlaq_f32(vc2, vp, vt);
-    vp = vmlaq_f32(vc1, vp, vt);
-
-    // Reconstruct the exp(-z) value:
-    //   e = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
-    //     = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
-    //     = s + (t * s) * p
-    vt = vmulq_f32(vt, vs);
-    float32x4_t ve = vmlaq_f32(vs, vp, vt);
-
-    // Denominator of the sigmoid fraction: 1.0 + exp(-z)
-    float32x4_t vd = vaddq_f32(ve, vone);
-
-    // Use Newton-Raphson method (2 iterations) to compute reciprocal of denominator.
-    // Note: 1 < d <= 2, because z >= 0.0 and 0 < exp(-z) <= 1.0.
-    // Thus the reciprocal of the denominator never overflows.
-    float32x4_t vr = vrecpeq_f32(vd);
-    vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
-    vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
-
-    // Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
-    float32x4_t vf = vmulq_f32(ve, vr);
-
-    // For inputs below denormal cutoff, replace output with +0.0f.
-    // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
-    vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
-
-    // Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
-    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
-    vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
-
-    vst1q_f32(output, vf); output += 4;
-  }
-}
diff --git a/src/operator-run.c b/src/operator-run.c
index 224b874..1dc06fb 100644
--- a/src/operator-run.c
+++ b/src/operator-run.c
@@ -734,12 +734,8 @@
 
 void xnn_compute_pad_5d(
     const struct pad_context context[restrict XNN_MIN_ELEMENTS(1)],
-    size_t i, size_t j, size_t k, size_t l, size_t m,
-    size_t l_range, size_t m_range)
+    size_t i, size_t j, size_t k, size_t l, size_t m)
 {
-  assert(l_range == 1);
-  assert(m_range == 1);
-
   const void* input = (const void*) ((uintptr_t) context->input +
     i * context->input_stride[4] + j * context->input_stride[3] + k * context->input_stride[2] + l * context->input_stride[1] + m * context->input_stride[0]);
   void* output = (void*) ((uintptr_t) context->output +
@@ -772,12 +768,8 @@
 
 void xnn_compute_elementwise_binary_5d(
     const struct elementwise_binary_context context[restrict XNN_MIN_ELEMENTS(1)],
-    size_t i, size_t j, size_t k, size_t l, size_t m,
-    size_t l_range, size_t m_range)
+    size_t i, size_t j, size_t k, size_t l, size_t m)
 {
-  assert(l_range == 1);
-  assert(m_range == 1);
-
   const void* a = (const void*) ((uintptr_t) context->a +
     i * context->a_stride[0] + j * context->a_stride[1] + k * context->a_stride[2] + l * context->a_stride[3] + m * context->a_stride[4]);
   const void* b = (const void*) ((uintptr_t) context->b +
@@ -1192,6 +1184,19 @@
           op->compute.tile[0], op->compute.tile[1],
           PTHREADPOOL_FLAG_DISABLE_DENORMALS /* flags */);
       break;
+    case xnn_parallelization_type_5d:
+      assert(op->compute.range[0] != 0);
+      assert(op->compute.range[1] != 0);
+      assert(op->compute.range[2] != 0);
+      assert(op->compute.range[3] != 0);
+      assert(op->compute.range[4] != 0);
+      pthreadpool_parallelize_5d(
+          threadpool,
+          op->compute.task_5d,
+          &op->context,
+          op->compute.range[0], op->compute.range[1], op->compute.range[2], op->compute.range[3], op->compute.range[4],
+          PTHREADPOOL_FLAG_DISABLE_DENORMALS /* flags */);
+      break;
     case xnn_parallelization_type_5d_tile_2d:
       assert(op->compute.range[0] != 0);
       assert(op->compute.range[1] != 0);
diff --git a/src/operator-strings.c b/src/operator-strings.c
index 3c41ff3..1af6157 100644
--- a/src/operator-strings.c
+++ b/src/operator-strings.c
@@ -68,6 +68,8 @@
       return "Depth To Space (NHWC, X32)";
     case xnn_operator_type_divide_nd_f32:
       return "Divide (ND, F32)";
+    case xnn_operator_type_elu_nc_f32:
+      return "ELU (NC, F32)";
     case xnn_operator_type_floor_nc_f32:
       return "Floor (NC, F32)";
     case xnn_operator_type_fully_connected_nc_f32:
diff --git a/src/operators/binary-elementwise-nd.c b/src/operators/binary-elementwise-nd.c
index 06f8d03..54908cb 100644
--- a/src/operators/binary-elementwise-nd.c
+++ b/src/operators/binary-elementwise-nd.c
@@ -552,8 +552,8 @@
     y_stride *= compressed_output_shape[i];
   }
 
-  binary_elementwise_op->compute.type = xnn_parallelization_type_5d_tile_2d;
-  binary_elementwise_op->compute.task_5d_tile_2d = (pthreadpool_task_5d_tile_2d_t) xnn_compute_elementwise_binary_5d;
+  binary_elementwise_op->compute.type = xnn_parallelization_type_5d;
+  binary_elementwise_op->compute.task_5d = (pthreadpool_task_5d_t) xnn_compute_elementwise_binary_5d;
   binary_elementwise_op->compute.range[0] = compressed_output_shape[5];
   binary_elementwise_op->compute.range[1] = compressed_output_shape[4];
   binary_elementwise_op->compute.range[2] = compressed_output_shape[3];
diff --git a/src/operators/constant-pad-nd.c b/src/operators/constant-pad-nd.c
index 452fba5..7c7271f 100644
--- a/src/operators/constant-pad-nd.c
+++ b/src/operators/constant-pad-nd.c
@@ -171,15 +171,13 @@
   constant_pad_op->context.pad.post_paddings[0] =
     constant_pad_op->context.pad.output_size[0] - constant_pad_op->context.pad.pre_paddings[0] - constant_pad_op->context.pad.input_size[0];
 
-  constant_pad_op->compute.type = xnn_parallelization_type_5d_tile_2d;
-  constant_pad_op->compute.task_5d_tile_2d = (pthreadpool_task_5d_tile_2d_t) xnn_compute_pad_5d;
+  constant_pad_op->compute.type = xnn_parallelization_type_5d;
+  constant_pad_op->compute.task_5d = (pthreadpool_task_5d_t) xnn_compute_pad_5d;
   constant_pad_op->compute.range[0] = normalized_output_shape[0];
   constant_pad_op->compute.range[1] = normalized_output_shape[1];
   constant_pad_op->compute.range[2] = normalized_output_shape[2];
   constant_pad_op->compute.range[3] = normalized_output_shape[3];
   constant_pad_op->compute.range[4] = normalized_output_shape[4];
-  constant_pad_op->compute.tile[0] = 1;
-  constant_pad_op->compute.tile[1] = 1;
   constant_pad_op->state = xnn_run_state_ready;
 
   return xnn_status_success;
diff --git a/src/operators/convolution-nchw.c b/src/operators/convolution-nchw.c
index a8f7153..bad04b7 100644
--- a/src/operators/convolution-nchw.c
+++ b/src/operators/convolution-nchw.c
@@ -175,34 +175,34 @@
   const bool is_3x3 = kernel_width == 3 && kernel_height == 3 && dilation_height == 1 && dilation_width == 1;
   const bool is_5x5 = kernel_width == 5 && kernel_height == 5 && dilation_height == 1 && dilation_width == 1;
   const bool nhwc_input = (flags & XNN_FLAG_INPUT_NHWC) != 0;
-  if (is_1x1 && !any_padding && !nhwc_input && groups == 1 && xnn_params.f32.spmm.ukernel != NULL) {
+  if (is_1x1 && !any_padding && !nhwc_input && groups == 1) {
     ukernel_type = xnn_ukernel_type_spmm;
   } else if (is_3x3 && subsampling_height == 2 && subsampling_width == 2 &&
     input_padding_top == 1 && input_padding_left == 1 && input_padding_bottom == 1 && input_padding_right == 1 &&
-    nhwc_input && groups == 1 && xnn_params.f32.conv_hwc2chw_3x3c3s2.ukernel_with_symm_padding != NULL)
+    nhwc_input && groups == 1)
   {
     ukernel_type = xnn_ukernel_type_conv2d_hwc2chw;
   } else if (is_3x3 && subsampling_height == 1 && subsampling_width == 1 &&
     input_padding_top == 1 && input_padding_left == 1 && input_padding_bottom == 1 && input_padding_right == 1 &&
-    !nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.dwconv2d_chw_3x3.ukernel != NULL)
+    !nhwc_input && group_input_channels == 1 && group_output_channels == 1)
   {
     ukernel_type = xnn_ukernel_type_dwconv;
     dwconv2d_parameters = &xnn_params.f32.dwconv2d_chw_3x3;
   } else if (is_3x3 && subsampling_height == 2 && subsampling_width == 2 &&
     (input_padding_top == 0 || input_padding_top == 1) && input_padding_left == 1 && input_padding_bottom == 1 && input_padding_right == 1 &&
-    !nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.dwconv2d_chw_3x3s2.ukernel != NULL)
+    !nhwc_input && group_input_channels == 1 && group_output_channels == 1)
   {
     ukernel_type = xnn_ukernel_type_dwconv;
     dwconv2d_parameters = &xnn_params.f32.dwconv2d_chw_3x3s2;
   } else if (is_5x5 && subsampling_height == 1 && subsampling_width == 1 &&
     input_padding_top == 2 && input_padding_left == 2 && input_padding_bottom == 2 && input_padding_right == 2 &&
-    !nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.dwconv2d_chw_5x5.ukernel != NULL)
+    !nhwc_input && group_input_channels == 1 && group_output_channels == 1)
   {
     ukernel_type = xnn_ukernel_type_dwconv;
     dwconv2d_parameters = &xnn_params.f32.dwconv2d_chw_5x5;
   } else if (is_5x5 && subsampling_height == 2 && subsampling_width == 2 &&
     (input_padding_top == 1 || input_padding_top == 2) && input_padding_left == 2 && input_padding_bottom == 2 && input_padding_right == 2 &&
-    !nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.dwconv2d_chw_5x5s2.ukernel != NULL)
+    !nhwc_input && group_input_channels == 1 && group_output_channels == 1)
   {
     ukernel_type = xnn_ukernel_type_dwconv;
     dwconv2d_parameters = &xnn_params.f32.dwconv2d_chw_5x5s2;
@@ -465,7 +465,6 @@
 
       convolution_op->ukernel.dwconv2d = (struct xnn_ukernel_dwconv2d) {
         .chw_function = dwconv2d_parameters->ukernel,
-        .input_width_tile = dwconv2d_parameters->input_width_tile,
         .output_width_tile = dwconv2d_parameters->output_width_tile,
       };
 
diff --git a/src/operators/global-average-pooling-ncw.c b/src/operators/global-average-pooling-ncw.c
index 343d45b..359680e 100644
--- a/src/operators/global-average-pooling-ncw.c
+++ b/src/operators/global-average-pooling-ncw.c
@@ -63,14 +63,6 @@
     goto error;
   }
 
-  status = xnn_status_unsupported_parameter;
-  if (xnn_params.f32.gavgpool_cw.ukernel == NULL) {
-    xnn_log_error(
-      "failed to create %s operator: only selected configurations parameters are supported",
-      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32));
-    goto error;
-  }
-
   status = xnn_status_out_of_memory;
 
   global_average_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
diff --git a/src/operators/unary-elementwise-nc.c b/src/operators/unary-elementwise-nc.c
index 472f4d6..914b788 100644
--- a/src/operators/unary-elementwise-nc.c
+++ b/src/operators/unary-elementwise-nc.c
@@ -278,6 +278,30 @@
     copy_op_out);
 }
 
+enum xnn_status xnn_create_elu_nc_f32(
+  size_t channels,
+  size_t input_stride,
+  size_t output_stride,
+  float alpha,
+  uint32_t flags,
+  xnn_operator_t* elu_op_out)
+{
+  if (alpha <= 0.0f || !isnormal(alpha)) {
+    xnn_log_error(
+      "failed to create %s operator with %.7g alpha parameter: alpha must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_elu_nc_f32), alpha);
+    return xnn_status_invalid_parameter;
+  }
+
+  const union xnn_f32_elu_params params = xnn_init_f32_elu_params(1.0f /* prescale */, alpha, 1.0f /* beta */);
+  return create_unary_elementwise_nc(
+    channels, input_stride, output_stride, flags,
+    &params, sizeof(params),
+    xnn_operator_type_elu_nc_f32,
+    xnn_params.f32.elu,
+    elu_op_out);
+}
+
 enum xnn_status xnn_create_floor_nc_f32(
     size_t channels,
     size_t input_stride,
@@ -573,6 +597,28 @@
     NULL, 0);
 }
 
+enum xnn_status xnn_setup_elu_nc_f32(
+    xnn_operator_t elu_op,
+    size_t batch_size,
+    const float* input,
+    float* output,
+    pthreadpool_t threadpool)
+{
+  if (elu_op->type != xnn_operator_type_elu_nc_f32) {
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_elu_nc_f32),
+      xnn_operator_type_to_string(elu_op->type));
+    return xnn_status_invalid_parameter;
+  }
+  elu_op->state = xnn_run_state_invalid;
+
+  return setup_unary_elementwise_nc(
+    elu_op,
+    batch_size, input, output,
+    2 /* log2(sizeof(float)) */,
+    &elu_op->params.f32_elu, sizeof(elu_op->params.f32_elu));
+}
+
 enum xnn_status xnn_setup_floor_nc_f32(
     xnn_operator_t floor_op,
     size_t batch_size,
diff --git a/src/qs8-requantization/fp32-psimd.c b/src/qs8-requantization/fp32-psimd.c
deleted file mode 100644
index 21958a1..0000000
--- a/src/qs8-requantization/fp32-psimd.c
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates.
-// All rights reserved.
-//
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <stdint.h>
-#include <stddef.h>
-
-#include <psimd.h>
-
-#include <xnnpack/requantization-stubs.h>
-
-
-void xnn_qs8_requantize_fp32__psimd(
-    size_t n,
-    const int32_t* input,
-    float scale,
-    int8_t zero_point,
-    int8_t qmin,
-    int8_t qmax,
-    int8_t* output)
-{
-  assert(n % 16 == 0);
-  assert(scale < 1.0f);
-  assert(scale >= 0x1.0p-32f);
-
-  const psimd_f32 vscale = psimd_splat_f32(scale);
-  const psimd_f32 vfmin = psimd_splat_f32((float) ((int32_t) qmin - (int32_t) zero_point));
-  const psimd_f32 vfmax = psimd_splat_f32((float) ((int32_t) qmax - (int32_t) zero_point));
-  const psimd_f32 vfmagic = psimd_splat_f32(12582912.0f);
-  const psimd_s32 vimagic = psimd_splat_s32(INT32_C(0x4B400000) - (int32_t) zero_point);
-  for (; n != 0; n -= 16) {
-    const psimd_s32 x = psimd_load_s32(input);
-    const psimd_s32 y = psimd_load_s32(input + 4);
-    const psimd_s32 z = psimd_load_s32(input + 8);
-    const psimd_s32 w = psimd_load_s32(input + 12);
-    input += 16;
-
-    // Convert int32_t input to FP32 and multiply by FP32 scale.
-    // Both operations involve roundings:
-    // - Large int32_t values can't be exactly represented as FP32. We expect that conversion instruction would
-    //   round it to nearest FP32 value with ties to even, but Clang documentation for __builtin_convertvector does
-    //   not guaratee that.
-    // - Product of two FP32 values is generally not exactly representation as an FP32 value, and will be rounded
-    //   to nearest FP32 value with ties to even.
-    const psimd_f32 x_scaled = psimd_cvt_s32_f32(x) * vscale;
-    const psimd_f32 y_scaled = psimd_cvt_s32_f32(y) * vscale;
-    const psimd_f32 z_scaled = psimd_cvt_s32_f32(z) * vscale;
-    const psimd_f32 w_scaled = psimd_cvt_s32_f32(w) * vscale;
-
-    // Clang/gcc vector extension does not provide an intrinsics for a floating-point to integer conversion
-    // operation with rounding-to-nearest-even. In lieu of such intrinsic, we use a magic trick of adding a large
-    // number (1.5 * 2**23) to scaled value to cause rounding to integer, and then substracing this magic number as
-    // integer. This trick works only in a limited range (absolute value of input must be less than 2**22), so
-    // generally we have to clamp input to this range before using the magic. However, clamping to any smaller range
-    // works just as well, and thus we clamp to [qmin - zero point, qmax - zero point] range so that after we add
-    // zero point to the result, it gets into target [qmin, qmax] range.
-    const psimd_f32 x_clamped = psimd_min_f32(psimd_max_f32(x_scaled, vfmin), vfmax);
-    const psimd_f32 y_clamped = psimd_min_f32(psimd_max_f32(y_scaled, vfmin), vfmax);
-    const psimd_f32 z_clamped = psimd_min_f32(psimd_max_f32(z_scaled, vfmin), vfmax);
-    const psimd_f32 w_clamped = psimd_min_f32(psimd_max_f32(w_scaled, vfmin), vfmax);
-
-    // Conversion to integer using the "magic trick". Rounding is performed in the output of addition operation,
-    // and result is rounded to nearest even integer with ties to even.
-    const psimd_s32 x_biased = (psimd_s32) (x_clamped + vfmagic) - vimagic;
-    const psimd_s32 y_biased = (psimd_s32) (y_clamped + vfmagic) - vimagic;
-    const psimd_s32 z_biased = (psimd_s32) (z_clamped + vfmagic) - vimagic;
-    const psimd_s32 w_biased = (psimd_s32) (w_clamped + vfmagic) - vimagic;
-
-    // Select low 8 bits of each 32-bit integer in the vectors for the output.
-    // Since result is already clamped to [qmin, qmax] subrange of [0, 255], saturation is not needed.
-    const psimd_s16 xy_packed = psimd_concat_even_s16((psimd_s16) x_biased, (psimd_s16) y_biased);
-    const psimd_s16 zw_packed = psimd_concat_even_s16((psimd_s16) z_biased, (psimd_s16) w_biased);
-
-    const psimd_s8 xyzw_packed = psimd_concat_even_s8((psimd_s8) xy_packed, (psimd_s8) zw_packed);
-
-    psimd_store_s8(output, xyzw_packed);
-    output += 16;
-  }
-}
diff --git a/src/qs8-requantization/precise-psimd.c b/src/qs8-requantization/precise-psimd.c
deleted file mode 100644
index 66bba39..0000000
--- a/src/qs8-requantization/precise-psimd.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates.
-// All rights reserved.
-//
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <stdint.h>
-#include <stddef.h>
-
-#include <psimd.h>
-
-#include <fp16/bitcasts.h>
-
-#include <xnnpack/requantization-stubs.h>
-
-
-void xnn_qs8_requantize_precise__psimd(
-    size_t n,
-    const int32_t* input,
-    float scale,
-    int8_t zero_point,
-    int8_t qmin,
-    int8_t qmax,
-    int8_t* output)
-{
-  assert(n % 16 == 0);
-  assert(scale < 1.0f);
-  assert(scale >= 0x1.0p-32f);
-
-  const uint32_t scale_bits = fp32_to_bits(scale);
-  const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
-  const uint32_t shift = 127 + 31 - (scale_bits >> 23);
-  assert(shift >= 32);
-  assert(shift < 64);
-  const uint64_t rounding = UINT64_C(1) << (shift - 1);
-
-  const psimd_u32 vmultiplier_lo = psimd_splat_u32(multiplier & UINT32_C(0x0000FFFF));
-  const psimd_u32 vmultiplier_hi = psimd_splat_u32(multiplier >> 16);
-  const psimd_s32 vzero_point = psimd_splat_s32((int32_t) zero_point);
-  const psimd_s32 vsmin = psimd_splat_s32((int32_t) qmin - (int32_t) zero_point);
-  const psimd_s32 vsmax = psimd_splat_s32((int32_t) qmax - (int32_t) zero_point);
-  const psimd_u32 vrounding_lo = psimd_splat_u32((uint32_t) rounding);
-  const psimd_u32 vrounding_hi = psimd_splat_u32((uint32_t) (rounding >> 32));
-  const psimd_u32 vshift = psimd_splat_u32(shift - 32);
-  for (; n != 0; n -= 16) {
-    const psimd_s32 x = psimd_load_s32(input);
-    const psimd_s32 y = psimd_load_s32(input + 4);
-    const psimd_s32 z = psimd_load_s32(input + 8);
-    const psimd_s32 w = psimd_load_s32(input + 12);
-    input += 16;
-
-    const psimd_s32 x_neg_mask = x >> psimd_splat_s32(31);
-    const psimd_s32 y_neg_mask = y >> psimd_splat_s32(31);
-    const psimd_s32 z_neg_mask = z >> psimd_splat_s32(31);
-    const psimd_s32 w_neg_mask = w >> psimd_splat_s32(31);
-
-    const psimd_u32 x_abs = (psimd_u32) ((x ^ x_neg_mask) - x_neg_mask);
-    const psimd_u32 y_abs = (psimd_u32) ((y ^ y_neg_mask) - y_neg_mask);
-    const psimd_u32 z_abs = (psimd_u32) ((z ^ z_neg_mask) - z_neg_mask);
-    const psimd_u32 w_abs = (psimd_u32) ((w ^ w_neg_mask) - w_neg_mask);
-
-    const psimd_u32 x_abs_lo = x_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
-    const psimd_u32 x_abs_hi = x_abs >> psimd_splat_u32(16);
-    const psimd_u32 y_abs_lo = y_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
-    const psimd_u32 y_abs_hi = y_abs >> psimd_splat_u32(16);
-    const psimd_u32 z_abs_lo = z_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
-    const psimd_u32 z_abs_hi = z_abs >> psimd_splat_u32(16);
-    const psimd_u32 w_abs_lo = w_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
-    const psimd_u32 w_abs_hi = w_abs >> psimd_splat_u32(16);
-
-    const psimd_u32 x_product_ll = x_abs_lo * vmultiplier_lo;
-    const psimd_u32 y_product_ll = y_abs_lo * vmultiplier_lo;
-    const psimd_u32 z_product_ll = z_abs_lo * vmultiplier_lo;
-    const psimd_u32 w_product_ll = w_abs_lo * vmultiplier_lo;
-
-    const psimd_u32 x_product_lh = x_abs_lo * vmultiplier_hi + (x_product_ll >> psimd_splat_u32(16));
-    const psimd_u32 y_product_lh = y_abs_lo * vmultiplier_hi + (y_product_ll >> psimd_splat_u32(16));
-    const psimd_u32 z_product_lh = z_abs_lo * vmultiplier_hi + (z_product_ll >> psimd_splat_u32(16));
-    const psimd_u32 w_product_lh = w_abs_lo * vmultiplier_hi + (w_product_ll >> psimd_splat_u32(16));
-
-    const psimd_u32 x_product_hl = x_abs_hi * vmultiplier_lo + (x_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 y_product_hl = y_abs_hi * vmultiplier_lo + (y_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 z_product_hl = z_abs_hi * vmultiplier_lo + (z_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 w_product_hl = w_abs_hi * vmultiplier_lo + (w_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-
-    const psimd_u32 x_product_lo =
-        (x_product_hl << psimd_splat_u32(16)) + (x_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 y_product_lo =
-        (y_product_hl << psimd_splat_u32(16)) + (y_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 z_product_lo =
-        (z_product_hl << psimd_splat_u32(16)) + (z_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 w_product_lo =
-        (w_product_hl << psimd_splat_u32(16)) + (w_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-
-    const psimd_u32 x_product_hi =
-        x_abs_hi * vmultiplier_hi + (x_product_lh >> psimd_splat_u32(16)) + (x_product_hl >> psimd_splat_u32(16));
-    const psimd_u32 y_product_hi =
-        y_abs_hi * vmultiplier_hi + (y_product_lh >> psimd_splat_u32(16)) + (y_product_hl >> psimd_splat_u32(16));
-    const psimd_u32 z_product_hi =
-        z_abs_hi * vmultiplier_hi + (z_product_lh >> psimd_splat_u32(16)) + (z_product_hl >> psimd_splat_u32(16));
-    const psimd_u32 w_product_hi =
-        w_abs_hi * vmultiplier_hi + (w_product_lh >> psimd_splat_u32(16)) + (w_product_hl >> psimd_splat_u32(16));
-
-    const psimd_u32 x_adjusted_product =
-        (x_product_hi + vrounding_hi) - (psimd_u32) ((psimd_s32) (x_product_lo & vrounding_lo) >> psimd_splat_s32(31));
-    const psimd_u32 y_adjusted_product =
-        (y_product_hi + vrounding_hi) - (psimd_u32) ((psimd_s32) (y_product_lo & vrounding_lo) >> psimd_splat_s32(31));
-    const psimd_u32 z_adjusted_product =
-        (z_product_hi + vrounding_hi) - (psimd_u32) ((psimd_s32) (z_product_lo & vrounding_lo) >> psimd_splat_s32(31));
-    const psimd_u32 w_adjusted_product =
-        (w_product_hi + vrounding_hi) - (psimd_u32) ((psimd_s32) (w_product_lo & vrounding_lo) >> psimd_splat_s32(31));
-
-    const psimd_u32 x_abs_scaled = x_adjusted_product >> vshift;
-    const psimd_u32 y_abs_scaled = y_adjusted_product >> vshift;
-    const psimd_u32 z_abs_scaled = z_adjusted_product >> vshift;
-    const psimd_u32 w_abs_scaled = w_adjusted_product >> vshift;
-
-    const psimd_s32 x_scaled = ((psimd_s32) x_abs_scaled ^ x_neg_mask) - x_neg_mask;
-    const psimd_s32 y_scaled = ((psimd_s32) y_abs_scaled ^ y_neg_mask) - y_neg_mask;
-    const psimd_s32 z_scaled = ((psimd_s32) z_abs_scaled ^ z_neg_mask) - z_neg_mask;
-    const psimd_s32 w_scaled = ((psimd_s32) w_abs_scaled ^ w_neg_mask) - w_neg_mask;
-
-    const psimd_s32 x_clamped = psimd_max_s32(psimd_min_s32(x_scaled, vsmax), vsmin) + vzero_point;
-    const psimd_s32 y_clamped = psimd_max_s32(psimd_min_s32(y_scaled, vsmax), vsmin) + vzero_point;
-    const psimd_s32 z_clamped = psimd_max_s32(psimd_min_s32(z_scaled, vsmax), vsmin) + vzero_point;
-    const psimd_s32 w_clamped = psimd_max_s32(psimd_min_s32(w_scaled, vsmax), vsmin) + vzero_point;
-
-    const psimd_s16 xy_clamped = psimd_concat_even_s16((psimd_s16) x_clamped, (psimd_s16) y_clamped);
-    const psimd_s16 zw_clamped = psimd_concat_even_s16((psimd_s16) z_clamped, (psimd_s16) w_clamped);
-
-    const psimd_s8 xyzw_clamped = psimd_concat_even_s8((psimd_s8) xy_clamped, (psimd_s8) zw_clamped);
-
-    psimd_store_s8(output, xyzw_clamped);
-    output += 16;
-  }
-}
diff --git a/src/qu8-requantization/fp32-psimd.c b/src/qu8-requantization/fp32-psimd.c
deleted file mode 100644
index 8f92680..0000000
--- a/src/qu8-requantization/fp32-psimd.c
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates.
-// All rights reserved.
-//
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <stdint.h>
-#include <stddef.h>
-
-#include <psimd.h>
-
-#include <xnnpack/requantization-stubs.h>
-
-
-void xnn_qu8_requantize_fp32__psimd(
-    size_t n,
-    const int32_t* input,
-    float scale,
-    uint8_t zero_point,
-    uint8_t qmin,
-    uint8_t qmax,
-    uint8_t* output)
-{
-  assert(n % 16 == 0);
-  assert(scale < 1.0f);
-  assert(scale >= 0x1.0p-32f);
-
-  const psimd_f32 vscale = psimd_splat_f32(scale);
-  const psimd_f32 vfmin = psimd_splat_f32((float) ((int32_t)(uint32_t) qmin - (int32_t)(uint32_t) zero_point));
-  const psimd_f32 vfmax = psimd_splat_f32((float) ((int32_t)(uint32_t) qmax - (int32_t)(uint32_t) zero_point));
-  const psimd_f32 vfmagic = psimd_splat_f32(12582912.0f);
-  const psimd_s32 vimagic = psimd_splat_s32(INT32_C(0x4B400000) - (int32_t)(uint32_t) zero_point);
-  for (; n != 0; n -= 16) {
-    const psimd_s32 x = psimd_load_s32(input);
-    const psimd_s32 y = psimd_load_s32(input + 4);
-    const psimd_s32 z = psimd_load_s32(input + 8);
-    const psimd_s32 w = psimd_load_s32(input + 12);
-    input += 16;
-
-    // Convert int32_t input to FP32 and multiply by FP32 scale.
-    // Both operations involve roundings:
-    // - Large int32_t values can't be exactly represented as FP32. We expect that conversion instruction would
-    //   round it to nearest FP32 value with ties to even, but Clang documentation for __builtin_convertvector does
-    //   not guaratee that.
-    // - Product of two FP32 values is generally not exactly representation as an FP32 value, and will be rounded
-    //   to nearest FP32 value with ties to even.
-    const psimd_f32 x_scaled = psimd_cvt_s32_f32(x) * vscale;
-    const psimd_f32 y_scaled = psimd_cvt_s32_f32(y) * vscale;
-    const psimd_f32 z_scaled = psimd_cvt_s32_f32(z) * vscale;
-    const psimd_f32 w_scaled = psimd_cvt_s32_f32(w) * vscale;
-
-    // Clang/gcc vector extension does not provide an intrinsics for a floating-point to integer conversion
-    // operation with rounding-to-nearest-even. In lieu of such intrinsic, we use a magic trick of adding a large
-    // number (1.5 * 2**23) to scaled value to cause rounding to integer, and then substracing this magic number as
-    // integer. This trick works only in a limited range (absolute value of input must be less than 2**22), so
-    // generally we have to clamp input to this range before using the magic. However, clamping to any smaller range
-    // works just as well, and thus we clamp to [qmin - zero point, qmax - zero point] range so that after we add
-    // zero point to the result, it gets into target [qmin, qmax] range.
-    const psimd_f32 x_clamped = psimd_min_f32(psimd_max_f32(x_scaled, vfmin), vfmax);
-    const psimd_f32 y_clamped = psimd_min_f32(psimd_max_f32(y_scaled, vfmin), vfmax);
-    const psimd_f32 z_clamped = psimd_min_f32(psimd_max_f32(z_scaled, vfmin), vfmax);
-    const psimd_f32 w_clamped = psimd_min_f32(psimd_max_f32(w_scaled, vfmin), vfmax);
-
-    // Conversion to integer using the "magic trick". Rounding is performed in the output of addition operation,
-    // and result is rounded to nearest even integer with ties to even.
-    const psimd_s32 x_biased = (psimd_s32)(x_clamped + vfmagic) - vimagic;
-    const psimd_s32 y_biased = (psimd_s32)(y_clamped + vfmagic) - vimagic;
-    const psimd_s32 z_biased = (psimd_s32)(z_clamped + vfmagic) - vimagic;
-    const psimd_s32 w_biased = (psimd_s32)(w_clamped + vfmagic) - vimagic;
-
-    // Select low 8 bits of each 32-bit integer in the vectors for the output.
-    // Since result is already clamped to [qmin, qmax] subrange of [0, 255], saturation is not needed.
-    const psimd_u16 xy_packed = psimd_concat_even_u16((psimd_u16) x_biased, (psimd_u16) y_biased);
-    const psimd_u16 zw_packed = psimd_concat_even_u16((psimd_u16) z_biased, (psimd_u16) w_biased);
-
-    const psimd_u8 xyzw_packed = psimd_concat_even_u8((psimd_u8) xy_packed, (psimd_u8) zw_packed);
-
-    psimd_store_u8(output, xyzw_packed);
-    output += 16;
-  }
-}
diff --git a/src/qu8-requantization/precise-psimd.c b/src/qu8-requantization/precise-psimd.c
deleted file mode 100644
index ca9b5b6..0000000
--- a/src/qu8-requantization/precise-psimd.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates.
-// All rights reserved.
-//
-// Copyright 2019 Google LLC
-//
-// This source code is licensed under the BSD-style license found in the
-// LICENSE file in the root directory of this source tree.
-
-#include <assert.h>
-#include <stdint.h>
-#include <stddef.h>
-
-#include <psimd.h>
-
-#include <fp16/bitcasts.h>
-
-#include <xnnpack/requantization-stubs.h>
-
-
-void xnn_qu8_requantize_precise__psimd(
-    size_t n,
-    const int32_t* input,
-    float scale,
-    uint8_t zero_point,
-    uint8_t qmin,
-    uint8_t qmax,
-    uint8_t* output)
-{
-  assert(n % 16 == 0);
-  assert(scale < 1.0f);
-  assert(scale >= 0x1.0p-32f);
-
-  const uint32_t scale_bits = fp32_to_bits(scale);
-  const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
-  const uint32_t shift = 127 + 31 - (scale_bits >> 23);
-  assert(shift >= 32);
-  assert(shift < 64);
-  const uint64_t rounding = UINT64_C(1) << (shift - 1);
-
-  const psimd_u32 vmultiplier_lo = psimd_splat_u32(multiplier & UINT32_C(0x0000FFFF));
-  const psimd_u32 vmultiplier_hi = psimd_splat_u32(multiplier >> 16);
-  const psimd_s32 vzero_point = psimd_splat_s32((int32_t)(uint32_t) zero_point);
-  const psimd_s32 vsmin = psimd_splat_s32((int32_t)(uint32_t) qmin - (int32_t)(uint32_t) zero_point);
-  const psimd_s32 vsmax = psimd_splat_s32((int32_t)(uint32_t) qmax - (int32_t)(uint32_t) zero_point);
-  const psimd_u32 vrounding_lo = psimd_splat_u32((uint32_t) rounding);
-  const psimd_u32 vrounding_hi = psimd_splat_u32((uint32_t)(rounding >> 32));
-  const psimd_u32 vshift = psimd_splat_u32(shift - 32);
-  for (; n != 0; n -= 16) {
-    const psimd_s32 x = psimd_load_s32(input);
-    const psimd_s32 y = psimd_load_s32(input + 4);
-    const psimd_s32 z = psimd_load_s32(input + 8);
-    const psimd_s32 w = psimd_load_s32(input + 12);
-    input += 16;
-
-    const psimd_s32 x_neg_mask = x >> psimd_splat_s32(31);
-    const psimd_s32 y_neg_mask = y >> psimd_splat_s32(31);
-    const psimd_s32 z_neg_mask = z >> psimd_splat_s32(31);
-    const psimd_s32 w_neg_mask = w >> psimd_splat_s32(31);
-
-    const psimd_u32 x_abs = (psimd_u32) ((x ^ x_neg_mask) - x_neg_mask);
-    const psimd_u32 y_abs = (psimd_u32) ((y ^ y_neg_mask) - y_neg_mask);
-    const psimd_u32 z_abs = (psimd_u32) ((z ^ z_neg_mask) - z_neg_mask);
-    const psimd_u32 w_abs = (psimd_u32) ((w ^ w_neg_mask) - w_neg_mask);
-
-    const psimd_u32 x_abs_lo = x_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
-    const psimd_u32 x_abs_hi = x_abs >> psimd_splat_u32(16);
-    const psimd_u32 y_abs_lo = y_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
-    const psimd_u32 y_abs_hi = y_abs >> psimd_splat_u32(16);
-    const psimd_u32 z_abs_lo = z_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
-    const psimd_u32 z_abs_hi = z_abs >> psimd_splat_u32(16);
-    const psimd_u32 w_abs_lo = w_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
-    const psimd_u32 w_abs_hi = w_abs >> psimd_splat_u32(16);
-
-    const psimd_u32 x_product_ll = x_abs_lo * vmultiplier_lo;
-    const psimd_u32 y_product_ll = y_abs_lo * vmultiplier_lo;
-    const psimd_u32 z_product_ll = z_abs_lo * vmultiplier_lo;
-    const psimd_u32 w_product_ll = w_abs_lo * vmultiplier_lo;
-
-    const psimd_u32 x_product_lh = x_abs_lo * vmultiplier_hi + (x_product_ll >> psimd_splat_u32(16));
-    const psimd_u32 y_product_lh = y_abs_lo * vmultiplier_hi + (y_product_ll >> psimd_splat_u32(16));
-    const psimd_u32 z_product_lh = z_abs_lo * vmultiplier_hi + (z_product_ll >> psimd_splat_u32(16));
-    const psimd_u32 w_product_lh = w_abs_lo * vmultiplier_hi + (w_product_ll >> psimd_splat_u32(16));
-
-    const psimd_u32 x_product_hl = x_abs_hi * vmultiplier_lo + (x_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 y_product_hl = y_abs_hi * vmultiplier_lo + (y_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 z_product_hl = z_abs_hi * vmultiplier_lo + (z_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 w_product_hl = w_abs_hi * vmultiplier_lo + (w_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-
-    const psimd_u32 x_product_lo =
-        (x_product_hl << psimd_splat_u32(16)) + (x_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 y_product_lo =
-        (y_product_hl << psimd_splat_u32(16)) + (y_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 z_product_lo =
-        (z_product_hl << psimd_splat_u32(16)) + (z_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-    const psimd_u32 w_product_lo =
-        (w_product_hl << psimd_splat_u32(16)) + (w_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
-
-    const psimd_u32 x_product_hi =
-        x_abs_hi * vmultiplier_hi + (x_product_lh >> psimd_splat_u32(16)) + (x_product_hl >> psimd_splat_u32(16));
-    const psimd_u32 y_product_hi =
-        y_abs_hi * vmultiplier_hi + (y_product_lh >> psimd_splat_u32(16)) + (y_product_hl >> psimd_splat_u32(16));
-    const psimd_u32 z_product_hi =
-        z_abs_hi * vmultiplier_hi + (z_product_lh >> psimd_splat_u32(16)) + (z_product_hl >> psimd_splat_u32(16));
-    const psimd_u32 w_product_hi =
-        w_abs_hi * vmultiplier_hi + (w_product_lh >> psimd_splat_u32(16)) + (w_product_hl >> psimd_splat_u32(16));
-
-    const psimd_u32 x_adjusted_product =
-        (x_product_hi + vrounding_hi) - (psimd_u32) ((psimd_s32) (x_product_lo & vrounding_lo) >> psimd_splat_s32(31));
-    const psimd_u32 y_adjusted_product =
-        (y_product_hi + vrounding_hi) - (psimd_u32) ((psimd_s32) (y_product_lo & vrounding_lo) >> psimd_splat_s32(31));
-    const psimd_u32 z_adjusted_product =
-        (z_product_hi + vrounding_hi) - (psimd_u32) ((psimd_s32) (z_product_lo & vrounding_lo) >> psimd_splat_s32(31));
-    const psimd_u32 w_adjusted_product =
-        (w_product_hi + vrounding_hi) - (psimd_u32) ((psimd_s32) (w_product_lo & vrounding_lo) >> psimd_splat_s32(31));
-
-    const psimd_u32 x_abs_scaled = x_adjusted_product >> vshift;
-    const psimd_u32 y_abs_scaled = y_adjusted_product >> vshift;
-    const psimd_u32 z_abs_scaled = z_adjusted_product >> vshift;
-    const psimd_u32 w_abs_scaled = w_adjusted_product >> vshift;
-
-    const psimd_s32 x_scaled = ((psimd_s32) x_abs_scaled ^ x_neg_mask) - x_neg_mask;
-    const psimd_s32 y_scaled = ((psimd_s32) y_abs_scaled ^ y_neg_mask) - y_neg_mask;
-    const psimd_s32 z_scaled = ((psimd_s32) z_abs_scaled ^ z_neg_mask) - z_neg_mask;
-    const psimd_s32 w_scaled = ((psimd_s32) w_abs_scaled ^ w_neg_mask) - w_neg_mask;
-
-    const psimd_u32 x_clamped = (psimd_u32) (psimd_max_s32(psimd_min_s32(x_scaled, vsmax), vsmin) + vzero_point);
-    const psimd_u32 y_clamped = (psimd_u32) (psimd_max_s32(psimd_min_s32(y_scaled, vsmax), vsmin) + vzero_point);
-    const psimd_u32 z_clamped = (psimd_u32) (psimd_max_s32(psimd_min_s32(z_scaled, vsmax), vsmin) + vzero_point);
-    const psimd_u32 w_clamped = (psimd_u32) (psimd_max_s32(psimd_min_s32(w_scaled, vsmax), vsmin) + vzero_point);
-
-    const psimd_u16 xy_clamped = psimd_concat_even_u16((psimd_u16) x_clamped, (psimd_u16) y_clamped);
-    const psimd_u16 zw_clamped = psimd_concat_even_u16((psimd_u16) z_clamped, (psimd_u16) w_clamped);
-
-    const psimd_u8 xyzw_clamped = psimd_concat_even_u8((psimd_u8) xy_clamped, (psimd_u8) zw_clamped);
-
-    psimd_store_u8(output, xyzw_clamped);
-    output += 16;
-  }
-}
diff --git a/src/runtime.c b/src/runtime.c
index dce4f96..7ec2096 100644
--- a/src/runtime.c
+++ b/src/runtime.c
@@ -62,7 +62,7 @@
     goto error;
   }
 
-  xnn_subgraph_optimize(subgraph, 0 /* flags */);
+  xnn_subgraph_optimize(subgraph, flags & XNN_FLAG_SPARSE_INFERENCE);
 
   status = xnn_status_out_of_memory;
 
@@ -439,6 +439,21 @@
         runtime->opdata[i].inputs[1] = node->inputs[1];
         runtime->opdata[i].outputs[0] = node->outputs[0];
         break;
+      case xnn_node_type_elu:
+        status = xnn_create_elu_nc_f32(
+          values[node->inputs[0]].shape.dim[values[node->inputs[0]].shape.num_dims - 1] /* channels */,
+          values[node->inputs[0]].shape.dim[values[node->inputs[0]].shape.num_dims - 1] /* input stride */,
+          values[node->inputs[0]].shape.dim[values[node->inputs[0]].shape.num_dims - 1] /* output stride */,
+          node->params.elu.alpha,
+          node->flags,
+          &runtime->opdata[i].operator_object);
+        if (status != xnn_status_success) {
+          goto error;
+        }
+        runtime->opdata[i].batch_size = product_non_channel_dims(&values[node->inputs[0]].shape);
+        runtime->opdata[i].inputs[0] = node->inputs[0];
+        runtime->opdata[i].outputs[0] = node->outputs[0];
+        break;
       case xnn_node_type_fully_connected:
       {
         const size_t num_input_elements = product_all_dims(&values[node->inputs[0]].shape);
@@ -1110,6 +1125,16 @@
           runtime->blobs[opdata->outputs[0]].data,
           runtime->threadpool);
         break;
+      case xnn_operator_type_elu_nc_f32:
+        assert(runtime->blobs[opdata->inputs[0]].data != NULL);
+        assert(runtime->blobs[opdata->outputs[0]].data != NULL);
+        status = xnn_setup_elu_nc_f32(
+          opdata->operator_object,
+          opdata->batch_size,
+          runtime->blobs[opdata->inputs[0]].data,
+          runtime->blobs[opdata->outputs[0]].data,
+          runtime->threadpool);
+        break;
       case xnn_operator_type_fully_connected_nc_f32:
         assert(runtime->blobs[opdata->inputs[0]].data != NULL);
         assert(runtime->blobs[opdata->outputs[0]].data != NULL);
diff --git a/src/subgraph-strings.c b/src/subgraph-strings.c
index 0b8d2e9..c322ab6 100644
--- a/src/subgraph-strings.c
+++ b/src/subgraph-strings.c
@@ -42,6 +42,8 @@
       return "Depth To Space";
     case xnn_node_type_divide:
       return "Divide";
+    case xnn_node_type_elu:
+      return "ELU";
     case xnn_node_type_fully_connected:
       return "Fully Connected";
     case xnn_node_type_floor:
diff --git a/src/subgraph.c b/src/subgraph.c
index 1d97284..68070eb 100644
--- a/src/subgraph.c
+++ b/src/subgraph.c
@@ -206,8 +206,9 @@
         default:
           return 0;
       }
-    case xnn_node_type_global_average_pooling_2d:
     case xnn_node_type_depth_to_space:
+      return XNN_LAYOUT_FLAG_COMPATIBLE_NCHW2NHWC;
+    case xnn_node_type_global_average_pooling_2d:
       return XNN_LAYOUT_FLAG_COMPATIBLE_NCHW | XNN_LAYOUT_FLAG_COMPATIBLE_NCHW2NHWC;
     case xnn_node_type_add2:
     case xnn_node_type_multiply2:
@@ -253,6 +254,7 @@
     case xnn_node_type_bankers_rounding:
     case xnn_node_type_ceiling:
     case xnn_node_type_clamp:
+    case xnn_node_type_elu:
     case xnn_node_type_floor:
     case xnn_node_type_hardswish:
     case xnn_node_type_leaky_relu:
@@ -420,6 +422,37 @@
       }
     }
   }
+  // Evaluate if it is profitable to run the model as sparse:
+  // - Compute the number of parameters and zeroes in 1x1 Convolution weights
+  // - Disable sparse rewriting for clusters without 1x1 Convolutions (num_params == 0)
+  //   or with less than 2/3rd of zeroes in 1x1 Convolution filters
+  for (uint32_t n = 0; n < subgraph->num_nodes; n++) {
+    struct xnn_node* node = &subgraph->nodes[n];
+    if ((subgraph->nodes[node->cluster_leader].layout_flags & XNN_LAYOUT_FLAG_INCOMPATIBLE_CLUSTER) != 0) {
+      continue;
+    }
+
+    if (node->type == xnn_node_type_convolution_2d &&
+        max(node->params.convolution_2d.kernel_height, node->params.convolution_2d.kernel_width) == 1)
+    {
+      assert(node->num_inputs >= 2);
+
+      const struct xnn_value* filter = &subgraph->values[node->inputs[1]];
+      assert(filter->data != NULL);
+      assert(filter->shape.num_dims == 4);
+
+      const size_t num_params = filter->shape.dim[0] * filter->shape.dim[3];
+      subgraph->nodes[node->cluster_leader].num_params += num_params;
+
+      const float* data = (const float*) filter->data;
+      size_t num_zeroes = 0;
+      for (size_t i = 0; i < num_params; i++) {
+        num_zeroes += (size_t) (data[i] == 0.0f);
+      }
+      xnn_log_debug("1x1 Convolution 2D Node #%" PRIu32 ": %zu / %zu sparsity", n, num_zeroes, num_params);
+      subgraph->nodes[node->cluster_leader].num_zeroes += num_zeroes;
+    }
+  }
   for (uint32_t n = 0; n < subgraph->num_nodes; n++) {
     struct xnn_node* node = &subgraph->nodes[n];
     if ((subgraph->nodes[node->cluster_leader].layout_flags & XNN_LAYOUT_FLAG_INCOMPATIBLE_CLUSTER) != 0) {
@@ -430,6 +463,12 @@
       continue;
     }
 
+    if (subgraph->nodes[node->cluster_leader].num_zeroes * 3 <= subgraph->nodes[node->cluster_leader].num_params * 2) {
+      xnn_log_info("Node #%" PRIu32 ": sparse inference disabled: 1x1 Convolutions contain %zu / %zu zero weights",
+        n, subgraph->nodes[node->cluster_leader].num_zeroes, subgraph->nodes[node->cluster_leader].num_params);
+      continue;
+    }
+
     for (uint32_t i = 0; i < node->num_inputs; i++) {
       struct xnn_value* value = &subgraph->values[node->inputs[i]];
       if (value->data != NULL) {
@@ -624,7 +663,9 @@
   }
 
   #if XNN_ENABLE_SPARSE
-    xnn_subgraph_rewrite_for_nchw(subgraph);
+    if ((flags & XNN_FLAG_SPARSE_INFERENCE) && (xnn_params.init_flags & XNN_INIT_FLAG_CHW_OPT)) {
+      xnn_subgraph_rewrite_for_nchw(subgraph);
+    }
   #endif
 
   return xnn_status_success;
diff --git a/src/subgraph/elu.c b/src/subgraph/elu.c
new file mode 100644
index 0000000..0c1667f
--- /dev/null
+++ b/src/subgraph/elu.c
@@ -0,0 +1,64 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <xnnpack.h>
+#include <xnnpack/log.h>
+#include <xnnpack/params.h>
+#include <xnnpack/subgraph.h>
+
+
+enum xnn_status xnn_define_elu(
+  xnn_subgraph_t subgraph,
+  float alpha,
+  uint32_t input_id,
+  uint32_t output_id,
+  uint32_t flags)
+{
+  if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_elu));
+    return xnn_status_uninitialized;
+  }
+
+  if (alpha <= 0.0f || !isnormal(alpha)) {
+    xnn_log_error(
+      "failed to define %s operator with %.7g alpha parameter: alpha must be finite, normalized, and positive",
+      xnn_node_type_to_string(xnn_node_type_elu), alpha);
+    return xnn_status_invalid_parameter;
+  }
+
+  if (input_id >= subgraph->num_values) {
+    xnn_log_error(
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_elu), input_id);
+    return xnn_status_invalid_parameter;
+  }
+
+  if (output_id >= subgraph->num_values) {
+    xnn_log_error(
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_elu), output_id);
+    return xnn_status_invalid_parameter;
+  }
+
+  struct xnn_node* node = xnn_subgraph_new_node(subgraph);
+  if (node == NULL) {
+    return xnn_status_out_of_memory;
+  }
+
+  node->type = xnn_node_type_elu;
+  node->params.elu.alpha = alpha;
+  node->num_inputs = 1;
+  node->inputs[0] = input_id;
+  node->num_outputs = 1;
+  node->outputs[0] = output_id;
+  node->flags = flags;
+
+  return xnn_status_success;
+}
diff --git a/src/xnnpack/AlignedAllocator.h b/src/xnnpack/AlignedAllocator.h
index bb724ea..842c1dd 100644
--- a/src/xnnpack/AlignedAllocator.h
+++ b/src/xnnpack/AlignedAllocator.h
@@ -6,6 +6,7 @@
 
 #include <cstddef>
 #include <limits>
+#include <memory>
 #include <type_traits>
 #include <utility>
 
diff --git a/src/xnnpack/common.h b/src/xnnpack/common.h
index 81db1e8..221f7b0 100644
--- a/src/xnnpack/common.h
+++ b/src/xnnpack/common.h
@@ -73,6 +73,12 @@
   #define XNN_PLATFORM_IOS 0
 #endif
 
+#if defined(__APPLE__) && TARGET_OS_MAC
+  #define XNN_PLATFORM_MAC 1
+#else
+  #define XNN_PLATFORM_MAC 0
+#endif
+
 #if XNN_PLATFORM_ANDROID || XNN_PLATFORM_IOS
   #define XNN_PLATFORM_MOBILE 1
 #else
diff --git a/src/xnnpack/compute.h b/src/xnnpack/compute.h
index bf8f44f..9f198e7 100644
--- a/src/xnnpack/compute.h
+++ b/src/xnnpack/compute.h
@@ -26,6 +26,7 @@
   xnn_parallelization_type_3d_tile_2d,
   xnn_parallelization_type_4d,
   xnn_parallelization_type_4d_tile_2d,
+  xnn_parallelization_type_5d,
   xnn_parallelization_type_5d_tile_2d,
   xnn_parallelization_type_6d_tile_2d,
 #if XNN_MAX_UARCH_TYPES > 1
@@ -47,6 +48,7 @@
     pthreadpool_task_3d_tile_2d_t task_3d_tile_2d;
     pthreadpool_task_4d_t task_4d;
     pthreadpool_task_4d_tile_2d_t task_4d_tile_2d;
+    pthreadpool_task_5d_t task_5d;
     pthreadpool_task_5d_tile_2d_t task_5d_tile_2d;
     pthreadpool_task_6d_tile_2d_t task_6d_tile_2d;
 #if XNN_MAX_UARCH_TYPES > 1
@@ -779,7 +781,7 @@
 #ifndef __cplusplus
   XNN_PRIVATE void xnn_compute_elementwise_binary_5d(
       const struct elementwise_binary_context context[restrict XNN_MIN_ELEMENTS(1)],
-      size_t i, size_t j, size_t k, size_t l, size_t m, size_t l_range, size_t m_range);
+      size_t i, size_t j, size_t k, size_t l, size_t m);
 #endif
 
 struct channel_shuffle_context {
@@ -933,7 +935,7 @@
 #ifndef __cplusplus
   XNN_PRIVATE void xnn_compute_pad_5d(
       const struct pad_context context[restrict XNN_MIN_ELEMENTS(1)],
-      size_t i, size_t j, size_t k, size_t l, size_t m, size_t l_range, size_t m_range);
+      size_t i, size_t j, size_t k, size_t l, size_t m);
 #endif
 
 struct u8_softmax_context {
diff --git a/src/xnnpack/conv.h b/src/xnnpack/conv.h
index c434398..7c4d150 100644
--- a/src/xnnpack/conv.h
+++ b/src/xnnpack/conv.h
@@ -72,6 +72,7 @@
       size_t output_channel_stride,                          \
       const union xnn_f32_minmax_params* params);
 
+DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2)
 DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2)
 DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__scalar_1x1)
 DECLARE_F32_CONV_HWC2CHW_UKERNEL_FUNCTION(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_1x1)
diff --git a/src/xnnpack/dwconv.h b/src/xnnpack/dwconv.h
index c341401..e06b798 100644
--- a/src/xnnpack/dwconv.h
+++ b/src/xnnpack/dwconv.h
@@ -126,37 +126,37 @@
 DECLARE_F32_DWCONV_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_ukernel_up8x4__wasmsimd)
 
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2)
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2)
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2)
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2)
 
 DECLARE_F32_DWCONV_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_ukernel_up4x9__wasmsimd)
 DECLARE_F32_DWCONV_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_ukernel_up8x9__wasmsimd)
 
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2)
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2)
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2)
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2)
 
 DECLARE_F32_DWCONV_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_ukernel_up4x25__wasmsimd)
 DECLARE_F32_DWCONV_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_ukernel_up8x25__wasmsimd)
 
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2)
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2)
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2)
 DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86)
-DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86)
+DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2)
 
 DECLARE_F32_DWCONV_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_ukernel_up1x4__wasm)
 DECLARE_F32_DWCONV_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_ukernel_up1x4__wasm_acc2)
@@ -354,28 +354,46 @@
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4_acc3)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4_acc4)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4_acc2)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc2)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc2)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_1x1)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_2x1)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_3x1)
@@ -425,8 +443,38 @@
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc3)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_1x4_acc4)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4_acc2)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_1x1)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_2x1)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_3x1)
@@ -470,8 +518,58 @@
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_2x4_acc3)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_3x4_acc2)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_2x1)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_3x1)
@@ -516,8 +614,46 @@
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neonfma_2x4_acc2)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neonfma_2x4_acc3)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neonfma_3x4_acc2)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2)
-DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3)
+DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_2x1)
 DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_3x1)
diff --git a/src/xnnpack/gavgpool.h b/src/xnnpack/gavgpool.h
index 52de9fb..67ef721 100644
--- a/src/xnnpack/gavgpool.h
+++ b/src/xnnpack/gavgpool.h
@@ -188,7 +188,6 @@
 
 DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__neon_x4)
 DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__sse_x4)
-DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__psimd_x4)
 DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4)
 DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__wasmsimd_x86_x4)
 DECLARE_F32_GAVGPOOL_CW_UKERNEL_FUNCTION(xnn_f32_gavgpool_cw_ukernel__scalar_x1)
diff --git a/src/xnnpack/gemm.h b/src/xnnpack/gemm.h
index 2564260..0eaa05a 100644
--- a/src/xnnpack/gemm.h
+++ b/src/xnnpack/gemm.h
@@ -129,16 +129,27 @@
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_6x8__aarch64_neonfma_ld64)
 
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__sse_load1)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1)
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__sse_load1)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1)
 
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__sse_dup)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup)
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__sse_dup)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup)
 
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8s4__sse)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8s4__sse)
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8s4__sse)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8s4__sse)
 
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x2c4__sse)
 
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup)
+
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__avx_broadcast)
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__avx_broadcast)
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__avx_broadcast)
@@ -172,17 +183,17 @@
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast)
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast)
 
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat)
 
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat)
 
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat)
 DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat)
@@ -192,17 +203,17 @@
 DECLARE_F32_GEMM_RELU_UKERNEL_FUNCTION(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat)
 DECLARE_F32_GEMM_RELU_UKERNEL_FUNCTION(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat)
 
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat)
 
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86)
-DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat)
+DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat)
 
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_1x8s4__wasmsimd_arm)
 DECLARE_F32_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemm_minmax_ukernel_3x8s4__wasmsimd_arm)
@@ -327,13 +338,24 @@
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__aarch64_neonfma_ld64)
 
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__sse_load1)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1)
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__sse_load1)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1)
 
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__sse_dup)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup)
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__sse_dup)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup)
+
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup)
 
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8s4__sse)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse)
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8s4__sse)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse)
 
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__avx_broadcast)
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__avx_broadcast)
@@ -368,17 +390,17 @@
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_7x16__avx512f_broadcast)
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast)
 
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat)
 
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat)
 
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8s4__wasmsimd_arm)
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8s4__wasmsimd_arm)
@@ -386,29 +408,29 @@
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmsimd_arm)
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8s4__wasmsimd_arm)
 
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat)
 
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat)
 
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat)
 
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86)
-DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat)
+DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat)
 
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_1x8s4__wasmsimd_arm)
 DECLARE_F32_GEMMINC_MINMAX_UKERNEL_FUNCTION(xnn_f32_gemminc_minmax_ukernel_3x8s4__wasmsimd_arm)
diff --git a/src/xnnpack/igemm.h b/src/xnnpack/igemm.h
index 64f72e3..7e09667 100644
--- a/src/xnnpack/igemm.h
+++ b/src/xnnpack/igemm.h
@@ -128,16 +128,27 @@
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__aarch32_neon_cortex_a55)
 
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__sse_load1)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1)
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__sse_load1)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1)
 
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__sse_dup)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup)
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__sse_dup)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup)
 
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8s4__sse)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8s4__sse)
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8s4__sse)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8s4__sse)
 
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x2c4__sse)
 
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup)
+
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__avx_broadcast)
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__avx_broadcast)
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__avx_broadcast)
@@ -179,29 +190,29 @@
 DECLARE_F32_IGEMM_RELU_UKERNEL_FUNCTION(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat)
 DECLARE_F32_IGEMM_RELU_UKERNEL_FUNCTION(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat)
 
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat)
 
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat)
 
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat)
 
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86)
-DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat)
+DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat)
 
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_1x8s4__wasmsimd_arm)
 DECLARE_F32_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_igemm_minmax_ukernel_3x8s4__wasmsimd_arm)
diff --git a/src/xnnpack/isa-checks.h b/src/xnnpack/isa-checks.h
index ab7f0ed..5846e9f 100644
--- a/src/xnnpack/isa-checks.h
+++ b/src/xnnpack/isa-checks.h
@@ -13,17 +13,6 @@
 #include <xnnpack/common.h>
 
 
-#if XNN_ARCH_WASMSIMD
-  #define TEST_REQUIRES_PSIMD
-#else
-  #define TEST_REQUIRES_PSIMD \
-    do { \
-      if (!cpuinfo_initialize() || !(cpuinfo_has_arm_neon() || cpuinfo_has_x86_sse2())) { \
-        GTEST_SKIP(); \
-      } \
-    } while (0)
-#endif
-
 #define TEST_REQUIRES_X86_SSE \
   do { \
     if (!cpuinfo_initialize() || !cpuinfo_has_x86_sse()) { \
diff --git a/src/xnnpack/operator.h b/src/xnnpack/operator.h
index f5b59d2..765bc8c 100644
--- a/src/xnnpack/operator.h
+++ b/src/xnnpack/operator.h
@@ -57,6 +57,7 @@
   xnn_operator_type_depth_to_space_nchw2nhwc_x32,
   xnn_operator_type_depth_to_space_nhwc_x32,
   xnn_operator_type_divide_nd_f32,
+  xnn_operator_type_elu_nc_f32,
   xnn_operator_type_fully_connected_nc_f32,
   xnn_operator_type_fully_connected_nc_qu8,
   xnn_operator_type_floor_nc_f32,
@@ -114,7 +115,6 @@
   union {
     xnn_dwconv2d_chw_ukernel_function chw_function;
   };
-  uint8_t input_width_tile;
   uint8_t output_width_tile;
 };
 
@@ -258,6 +258,7 @@
 
   union {
     union xnn_f32_abs_params f32_abs;
+    union xnn_f32_elu_params f32_elu;
     union xnn_f32_lrelu_params f32_lrelu;
     union xnn_f32_neg_params f32_neg;
     union xnn_f32_rnd_params f32_rnd;
diff --git a/src/xnnpack/params-init.h b/src/xnnpack/params-init.h
index 8fce26f..e34bcdc 100644
--- a/src/xnnpack/params-init.h
+++ b/src/xnnpack/params-init.h
@@ -901,6 +901,32 @@
   return params;
 }
 
+static inline union xnn_f32_elu_params xnn_init_f32_elu_params(float prescale, float alpha, float beta)
+{
+  union xnn_f32_elu_params params;
+  #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+    for (uint32_t i = 0; i < 4; i++) {
+      params.sse.prescale[i] = prescale;
+      params.sse.alpha[i] = alpha;
+      params.sse.beta[i] = beta;
+    }
+  #else
+    params.scalar.prescale = prescale;
+    params.scalar.alpha = alpha;
+    params.scalar.beta = beta;
+  #endif
+  return params;
+}
+
+static inline union xnn_f32_elu_params xnn_init_scalar_f32_elu_params(float prescale, float alpha, float beta)
+{
+  union xnn_f32_elu_params params;
+  params.scalar.prescale = prescale;
+  params.scalar.alpha = alpha;
+  params.scalar.beta = beta;
+  return params;
+}
+
 static inline union xnn_f32_lrelu_params xnn_init_f32_lrelu_params(float slope)
 {
   union xnn_f32_lrelu_params params;
diff --git a/src/xnnpack/params.h b/src/xnnpack/params.h
index 1cd5f53..a9e3c66 100644
--- a/src/xnnpack/params.h
+++ b/src/xnnpack/params.h
@@ -98,6 +98,21 @@
 #endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
 };
 
+union xnn_f32_elu_params {
+  struct {
+    float prescale;
+    float alpha;
+    float beta;
+  } scalar;
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  struct {
+    XNN_ALIGN(16) float prescale[4];
+    XNN_ALIGN(16) float alpha[4];
+    XNN_ALIGN(16) float beta[4];
+  } sse;
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+};
+
 union xnn_f32_lrelu_params {
   struct {
     float slope;
@@ -1564,6 +1579,12 @@
     int8_t* output,
     const union xnn_qs8_add_params* params);
 
+typedef void (*xnn_f32_velu_ukernel_function)(
+    size_t n,
+    const float* x,
+    float* y,
+    const union xnn_f32_elu_params* params);
+
 typedef void (*xnn_f32_vsqrt_ukernel_function)(
     size_t n,
     const float* x,
@@ -1827,8 +1848,6 @@
 
 struct dwconv2d_chw_parameters {
   xnn_dwconv2d_chw_ukernel_function ukernel;
-  // Number of input width pixels in a tile.
-  uint8_t input_width_tile;
   // Number of output width pixels in a tile.
   uint8_t output_width_tile;
   // Number of output height pixels in a tile.
@@ -1981,6 +2000,8 @@
 #define XNN_INIT_FLAG_X8      0x00000100
 // Indicates that XX XNNPACK microkernels are available for use.
 #define XNN_INIT_FLAG_XX      0x00000200
+// Indicates that CHW XNNPACK microkernels are optimized for the host platform.
+#define XNN_INIT_FLAG_CHW_OPT 0x00000400
 
 struct xnn_parameters {
   // Bitwise combination of XNN_INIT_FLAG_* flags
@@ -2035,6 +2056,7 @@
     struct ibilinear_parameters ibilinear;
     xnn_univector_ukernel_function abs;
     xnn_univector_ukernel_function clamp;
+    xnn_univector_ukernel_function elu;
     xnn_univector_ukernel_function hswish;
     xnn_univector_ukernel_function lrelu;
     xnn_univector_ukernel_function neg;
diff --git a/src/xnnpack/ppmm.h b/src/xnnpack/ppmm.h
index be539a4..cec1487 100644
--- a/src/xnnpack/ppmm.h
+++ b/src/xnnpack/ppmm.h
@@ -36,8 +36,8 @@
 
 DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__sse)
 
-DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm)
-DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86)
+DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat)
+DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat)
 
 DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_2x4__scalar)
 DECLARE_F32_PPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_ppmm_minmax_ukernel_3x3__scalar)
diff --git a/src/xnnpack/requantization-stubs.h b/src/xnnpack/requantization-stubs.h
index 78c8955..76dadaf 100644
--- a/src/xnnpack/requantization-stubs.h
+++ b/src/xnnpack/requantization-stubs.h
@@ -44,13 +44,11 @@
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_precise__ssse3)
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_precise__sse4)
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_precise__neon)
-DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_precise__psimd)
 
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__scalar_lrintf)
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__scalar_magic)
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__sse2)
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__neon)
-DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__psimd)
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_fp32__wasmsimd)
 
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_q31__scalar)
@@ -58,7 +56,6 @@
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_q31__ssse3)
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_q31__sse4)
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_q31__neon)
-DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_q31__psimd)
 DECLARE_QU8_REQUANTIZATION_FUNCTION(xnn_qu8_requantize_q31__wasmsimd)
 
 
@@ -88,14 +85,12 @@
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_precise__ssse3)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_precise__sse4)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_precise__neon)
-DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_precise__psimd)
 
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__scalar_lrintf)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__scalar_magic)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__sse2)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__sse4)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__neon)
-DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__psimd)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_fp32__wasmsimd)
 
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_q31__scalar)
@@ -103,7 +98,6 @@
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_q31__ssse3)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_q31__sse4)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_q31__neon)
-DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_q31__psimd)
 DECLARE_QS8_REQUANTIZATION_FUNCTION(xnn_qs8_requantize_q31__wasmsimd)
 
 
diff --git a/src/xnnpack/spmm.h b/src/xnnpack/spmm.h
index 416c9af..95fc340 100644
--- a/src/xnnpack/spmm.h
+++ b/src/xnnpack/spmm.h
@@ -32,6 +32,9 @@
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_1x1__scalar_pipelined)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_2x1__scalar)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_2x1__scalar_pipelined)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neon)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neonfma)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neonfma_pipelined)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__neonfma_x2)
@@ -50,6 +53,9 @@
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86_x4)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x2__neonfma)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_4x4__neonfma)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neon)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neonfma)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neonfma_pipelined)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x1__neonfma_x2)
@@ -70,9 +76,13 @@
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x2__scalar)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x4__neonfma)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_8x4__scalar)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_12x1__neon)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_12x1__neonfma)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_12x2__neonfma)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_12x4__neonfma)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neon)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neonfma)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neonfma_pipelined)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__neonfma_x2)
@@ -89,6 +99,9 @@
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_x4)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x2__neonfma)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_16x4__neonfma)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neon)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined)
+DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neonfma)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neonfma_pipelined)
 DECLARE_F32_SPMM_MINMAX_UKERNEL_FUNCTION(xnn_f32_spmm_minmax_ukernel_32x1__neonfma_x2)
diff --git a/src/xnnpack/subgraph.h b/src/xnnpack/subgraph.h
index 185efb1..c76d361 100644
--- a/src/xnnpack/subgraph.h
+++ b/src/xnnpack/subgraph.h
@@ -91,6 +91,7 @@
   xnn_node_type_depthwise_convolution_2d,
   xnn_node_type_depth_to_space,
   xnn_node_type_divide,
+  xnn_node_type_elu,
   xnn_node_type_fully_connected,
   xnn_node_type_floor,
   xnn_node_type_global_average_pooling_2d,
@@ -181,6 +182,9 @@
       uint32_t dilation_width;
     } pooling_2d;
     struct {
+      float alpha;
+    } elu;
+    struct {
       float negative_slope;
     } leaky_relu;
     struct {
@@ -209,6 +213,12 @@
   uint32_t flags;
   uint32_t layout_flags;
   uint32_t cluster_leader;
+  // Number of filter parameters in all 1x1 Convolutions of the sparse cluster.
+  // This value is properly initialized only in sparse inference analysis of 1x1 Convolutions.
+  size_t num_params;
+  // Number of zero filter parameters in all 1x1 Convolutions of the sparse cluster.
+  // This value is properly initialized only in sparse inference analysis of 1x1 Convolutions.
+  size_t num_zeroes;
 };
 
 struct xnn_operator_data {
diff --git a/src/xnnpack/vunary.h b/src/xnnpack/vunary.h
index b4442e2..2daf409 100644
--- a/src/xnnpack/vunary.h
+++ b/src/xnnpack/vunary.h
@@ -405,6 +405,209 @@
 DECLARE_F32_VABS_UKERNEL_FUNCTION(xnn_f32_vabs_ukernel__scalar_x4)
 
 
+#define DECLARE_F32_VELU_UKERNEL_FUNCTION(fn_name) \
+  XNN_INTERNAL void fn_name(                       \
+      size_t n,                                    \
+      const float* x,                              \
+      float* y,                                    \
+      const union xnn_f32_elu_params* params);
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_p6_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_p6_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_p6_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_p6_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_p6_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neon_rr2_p6_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_p6_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_p6_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_p6_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_p6_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_p6_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__neonfma_rr1_p6_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_p6_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_p6_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_p6_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_p6_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_p6_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse2_rr2_p6_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_p6_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_p6_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_p6_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_p6_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_p6_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__sse41_rr2_p6_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_p6_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_p6_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_p6_x24)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_p6_x32)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_p6_x40)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx_rr2_p6_x48)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x24)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x32)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x40)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x48)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x56)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x64)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x72)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx2_rr1_p6_x80)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_p6_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_p6_x32)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_p6_x48)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_p6_x64)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_p6_x80)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_p6_x96)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_p6_x112)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__avx512f_rr1_p6_x128)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_p6_x1)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_p6_x2)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_p6_x3)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_p6_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_p6_x5)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__wasm_rr2_p6_x6)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6)
+
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_p6_x1)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_p6_x2)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_p6_x3)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_p6_x4)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_p6_x5)
+DECLARE_F32_VELU_UKERNEL_FUNCTION(xnn_f32_velu_ukernel__scalar_rr2_p6_x6)
+
+
 #define DECLARE_F32_VLRELU_UKERNEL_FUNCTION(fn_name) \
   XNN_INTERNAL void fn_name(                         \
       size_t n,                                      \
diff --git a/test/elu-nc.cc b/test/elu-nc.cc
new file mode 100644
index 0000000..ba23bc7
--- /dev/null
+++ b/test/elu-nc.cc
@@ -0,0 +1,78 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <gtest/gtest.h>
+
+#include "elu-operator-tester.h"
+
+
+TEST(ELU_NC_F32, unit_batch) {
+  for (size_t channels = 1; channels < 100; channels++) {
+    ELUOperatorTester()
+      .batch_size(1)
+      .channels(channels)
+      .iterations(3)
+      .TestF32();
+  }
+}
+
+TEST(ELU_NC_F32, small_batch) {
+  for (size_t channels = 1; channels < 100; channels++) {
+    ELUOperatorTester()
+      .batch_size(3)
+      .channels(channels)
+      .iterations(3)
+      .TestF32();
+  }
+}
+
+TEST(ELU_NC_F32, small_batch_with_input_stride) {
+  for (size_t channels = 1; channels < 100; channels += 15) {
+    ELUOperatorTester()
+      .batch_size(3)
+      .channels(channels)
+      .input_stride(129)
+      .iterations(3)
+      .TestF32();
+  }
+}
+
+TEST(ELU_NC_F32, small_batch_with_output_stride) {
+  for (size_t channels = 1; channels < 100; channels += 15) {
+    ELUOperatorTester()
+      .batch_size(3)
+      .channels(channels)
+      .output_stride(117)
+      .iterations(3)
+      .TestF32();
+  }
+}
+
+TEST(ELU_NC_F32, small_batch_with_input_and_output_stride) {
+  for (size_t channels = 1; channels < 100; channels += 15) {
+    ELUOperatorTester()
+      .batch_size(3)
+      .channels(channels)
+      .input_stride(129)
+      .output_stride(117)
+      .iterations(3)
+      .TestF32();
+  }
+}
+
+TEST(ELU_NC_F32, small_batch_with_alpha) {
+  for (size_t batch_size = 1; batch_size <= 3; batch_size += 2) {
+    for (size_t channels = 1; channels < 100; channels += 15) {
+      for (float alpha = 1.0e-4f; alpha < 1.0f; alpha *= 3.14159265f) {
+        ELUOperatorTester()
+          .batch_size(3)
+          .channels(channels)
+          .alpha(alpha)
+          .iterations(1)
+          .TestF32();
+      }
+    }
+  }
+}
diff --git a/test/elu-operator-tester.h b/test/elu-operator-tester.h
new file mode 100644
index 0000000..a673b62
--- /dev/null
+++ b/test/elu-operator-tester.h
@@ -0,0 +1,158 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#pragma once
+
+#include <gtest/gtest.h>
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <cstddef>
+#include <cstdlib>
+#include <functional>
+#include <random>
+#include <vector>
+
+#include <xnnpack.h>
+
+
+class ELUOperatorTester {
+ public:
+  inline ELUOperatorTester& channels(size_t channels) {
+    assert(channels != 0);
+    this->channels_ = channels;
+    return *this;
+  }
+
+  inline size_t channels() const {
+    return this->channels_;
+  }
+
+  inline ELUOperatorTester& input_stride(size_t input_stride) {
+    assert(input_stride != 0);
+    this->input_stride_ = input_stride;
+    return *this;
+  }
+
+  inline size_t input_stride() const {
+    if (this->input_stride_ == 0) {
+      return this->channels_;
+    } else {
+      assert(this->input_stride_ >= this->channels_);
+      return this->input_stride_;
+    }
+  }
+
+  inline ELUOperatorTester& output_stride(size_t output_stride) {
+    assert(output_stride != 0);
+    this->output_stride_ = output_stride;
+    return *this;
+  }
+
+  inline size_t output_stride() const {
+    if (this->output_stride_ == 0) {
+      return this->channels_;
+    } else {
+      assert(this->output_stride_ >= this->channels_);
+      return this->output_stride_;
+    }
+  }
+
+  inline ELUOperatorTester& batch_size(size_t batch_size) {
+    assert(batch_size != 0);
+    this->batch_size_ = batch_size;
+    return *this;
+  }
+
+  inline size_t batch_size() const {
+    return this->batch_size_;
+  }
+
+  inline ELUOperatorTester& alpha(float alpha) {
+    assert(alpha > 0.0f);
+    assert(alpha < 1.0f);
+    this->alpha_ = alpha;
+    return *this;
+  }
+
+  inline float alpha() const {
+    return this->alpha_;
+  }
+
+  inline ELUOperatorTester& iterations(size_t iterations) {
+    this->iterations_ = iterations;
+    return *this;
+  }
+
+  inline size_t iterations() const {
+    return this->iterations_;
+  }
+
+  void TestF32() const {
+    std::random_device random_device;
+    auto rng = std::mt19937(random_device());
+    auto f32rng = std::bind(std::uniform_real_distribution<float>(-20.0f, 20.0f), std::ref(rng));
+
+    std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + channels());
+    std::vector<float> output((batch_size() - 1) * output_stride() + channels());
+    std::vector<double> output_ref(batch_size() * channels());
+    for (size_t iteration = 0; iteration < iterations(); iteration++) {
+      std::generate(input.begin(), input.end(), std::ref(f32rng));
+      std::fill(output.begin(), output.end(), std::nanf(""));
+
+      // Compute reference results.
+      for (size_t i = 0; i < batch_size(); i++) {
+        for (size_t c = 0; c < channels(); c++) {
+          const double x = double(input[i * input_stride() + c]);
+          output_ref[i * channels() + c] = std::signbit(x) ? std::expm1(x) * alpha() : x;
+        }
+      }
+
+      // Create, setup, run, and destroy ELU operator.
+      ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
+      xnn_operator_t elu_op = nullptr;
+
+      ASSERT_EQ(xnn_status_success,
+        xnn_create_elu_nc_f32(
+          channels(), input_stride(), output_stride(),
+          alpha(),
+          0, &elu_op));
+      ASSERT_NE(nullptr, elu_op);
+
+      // Smart pointer to automatically delete elu_op.
+      std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_elu_op(elu_op, xnn_delete_operator);
+
+      ASSERT_EQ(xnn_status_success,
+        xnn_setup_elu_nc_f32(
+          elu_op,
+          batch_size(),
+          input.data(), output.data(),
+          nullptr /* thread pool */));
+
+      ASSERT_EQ(xnn_status_success,
+        xnn_run_operator(elu_op, nullptr /* thread pool */));
+
+      // Verify results.
+      for (size_t i = 0; i < batch_size(); i++) {
+        for (size_t c = 0; c < channels(); c++) {
+          ASSERT_NEAR(output[i * output_stride() + c],
+                      output_ref[i * channels() + c],
+                      std::abs(output_ref[i * channels() + c]) * 1.0e-5)
+            << "at batch " << i << " / " << batch_size() << ", channel " << c << " / " << channels()
+            << ", input " << input[i * input_stride() + c] << ", alpha " << alpha();
+        }
+      }
+    }
+  }
+
+ private:
+  size_t batch_size_{1};
+  size_t channels_{1};
+  size_t input_stride_{0};
+  size_t output_stride_{0};
+  float alpha_{0.5f};
+  size_t iterations_{15};
+};
diff --git a/test/f32-conv-hwc2chw.cc b/test/f32-conv-hwc2chw.cc
index 7d8f473..83c7e0d 100644
--- a/test/f32-conv-hwc2chw.cc
+++ b/test/f32-conv-hwc2chw.cc
@@ -12,6 +12,287 @@
 #include "conv-hwc2chw-microkernel-tester.h"
 
 
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, input_width_eq_4) {
+    TEST_REQUIRES_ARM_NEON;
+    ConvHWC2CHWMicrokernelTester()
+      .kernel_size(3)
+      .subsampling(2)
+      .padding_width(1)
+      .input_channels(3)
+      .output_channels_tile(4)
+      .output_channels(4)
+      .input_width(4)
+      .input_height(3)
+      .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, input_width_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t input_width = 8; input_width <= 32; input_width += 12) {
+      ConvHWC2CHWMicrokernelTester()
+        .kernel_size(3)
+        .subsampling(2)
+        .padding_width(1)
+        .input_channels(3)
+        .output_channels_tile(4)
+        .output_channels(4)
+        .input_width(input_width)
+        .input_height(3)
+        .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, input_width_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      ConvHWC2CHWMicrokernelTester()
+        .kernel_size(3)
+        .subsampling(2)
+        .padding_width(1)
+        .input_channels(3)
+        .output_channels_tile(4)
+        .output_channels(4)
+        .input_width(input_width)
+        .input_height(3)
+        .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, input_width_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t input_width = 5; input_width < 8; input_width++) {
+      ConvHWC2CHWMicrokernelTester()
+        .kernel_size(3)
+        .subsampling(2)
+        .padding_width(1)
+        .input_channels(3)
+        .output_channels_tile(4)
+        .output_channels(4)
+        .input_width(input_width)
+        .input_height(3)
+        .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, output_channels_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t output_channels = 1; output_channels < 4; output_channels++) {
+      for (size_t input_width = 1; input_width < 32; input_width += 7) {
+        ConvHWC2CHWMicrokernelTester()
+          .kernel_size(3)
+          .subsampling(2)
+          .padding_width(1)
+          .input_channels(3)
+          .output_channels_tile(4)
+          .output_channels(output_channels)
+          .input_width(input_width)
+          .input_height(3)
+          .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, output_channels_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t output_channels = 8; output_channels <= 16; output_channels += 4) {
+      for (size_t input_width = 1; input_width < 32; input_width += 7) {
+        ConvHWC2CHWMicrokernelTester()
+          .kernel_size(3)
+          .subsampling(2)
+          .padding_width(1)
+          .input_channels(3)
+          .output_channels_tile(4)
+          .output_channels(output_channels)
+          .input_width(input_width)
+          .input_height(3)
+          .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, output_channels_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t output_channels = 5; output_channels < 8; output_channels++) {
+      for (size_t input_width = 1; input_width < 32; input_width += 7) {
+        ConvHWC2CHWMicrokernelTester()
+          .kernel_size(3)
+          .subsampling(2)
+          .padding_width(1)
+          .input_channels(3)
+          .output_channels_tile(4)
+          .output_channels(output_channels)
+          .input_width(input_width)
+          .input_height(3)
+          .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, input_height_lt_3) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t output_channels = 1; output_channels < 8; output_channels += 3) {
+        for (size_t input_width = 1; input_width < 32; input_width += 7) {
+          ConvHWC2CHWMicrokernelTester()
+            .kernel_size(3)
+            .subsampling(2)
+            .padding(1)
+            .input_channels(3) // padded input height of at least 3 required
+            .output_channels_tile(4)
+            .output_channels(output_channels)
+            .input_width(input_width)
+            .input_height(input_height)
+            .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, input_height_gt_3) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t input_height = 4; input_height <= 9; input_height++) {
+      for (size_t output_channels = 1; output_channels < 8; output_channels += 3) {
+        for (size_t input_width = 1; input_width < 32; input_width += 7) {
+          ConvHWC2CHWMicrokernelTester()
+            .kernel_size(3)
+            .subsampling(2)
+            .padding_width(1)
+            .input_channels(3)
+            .output_channels_tile(4)
+            .output_channels(output_channels)
+            .input_width(input_width)
+            .input_height(input_height)
+            .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, padding_top) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t padding_top = 0; padding_top <= 1; padding_top++) {
+      for (size_t output_channels = 1; output_channels < 16; output_channels += 7) {
+        for (size_t input_width = 1; input_width < 32; input_width += 7) {
+          ConvHWC2CHWMicrokernelTester()
+            .kernel_size(3)
+            .subsampling(2)
+            .padding_width(1)
+            .padding_top(padding_top)
+            .input_channels(3)
+            .output_channels_tile(4)
+            .output_channels(output_channels)
+            .input_width(input_width)
+            .input_height(9)
+            .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, padding_bottom) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t padding_bottom = 0; padding_bottom <= 1; padding_bottom++) {
+      for (size_t output_channels = 1; output_channels < 16; output_channels += 7) {
+        for (size_t input_width = 1; input_width < 32; input_width += 7) {
+          ConvHWC2CHWMicrokernelTester()
+            .kernel_size(3)
+            .subsampling(2)
+            .padding_width(1)
+            .padding_bottom(padding_bottom)
+            .input_channels(3)
+            .output_channels_tile(4)
+            .output_channels(output_channels)
+            .input_width(input_width)
+            .input_height(9)
+            .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, output_y_start) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t output_y_start = 1; output_y_start <= 3; output_y_start++) {
+      for (size_t output_channels = 1; output_channels < 8; output_channels += 3) {
+        for (size_t input_width = 1; input_width < 32; input_width += 7) {
+          ConvHWC2CHWMicrokernelTester()
+            .kernel_size(3)
+            .subsampling(2)
+            .padding_width(1)
+            .input_channels(3)
+            .output_channels_tile(4)
+            .output_channels(output_channels)
+            .input_width(input_width)
+            .input_height(9)
+            .output_y_start(output_y_start)
+            .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, output_y_end) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t output_y_end = 2; output_y_end < 5; output_y_end++) {
+      for (size_t output_channels = 1; output_channels < 8; output_channels += 3) {
+        for (size_t input_width = 1; input_width < 32; input_width += 7) {
+          ConvHWC2CHWMicrokernelTester()
+            .kernel_size(3)
+            .subsampling(2)
+            .padding_width(1)
+            .input_channels(3)
+            .output_channels_tile(4)
+            .output_channels(output_channels)
+            .input_width(input_width)
+            .input_height(9)
+            .output_y_end(output_y_end)
+            .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t output_channels = 1; output_channels < 8; output_channels += 3) {
+      for (size_t input_width = 1; input_width < 32; input_width += 7) {
+        ConvHWC2CHWMicrokernelTester()
+          .kernel_size(3)
+          .subsampling(2)
+          .padding_width(1)
+          .input_channels(3)
+          .output_channels_tile(4)
+          .output_channels(output_channels)
+          .input_width(input_width)
+          .input_height(6)
+          .qmin(128)
+          .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+      }
+    }
+  }
+
+  TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEON_2X2, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t output_channels = 1; output_channels < 8; output_channels += 3) {
+      for (size_t input_width = 1; input_width < 32; input_width += 7) {
+        ConvHWC2CHWMicrokernelTester()
+          .kernel_size(3)
+          .subsampling(2)
+          .padding_width(1)
+          .input_channels(3)
+          .output_channels_tile(4)
+          .output_channels(output_channels)
+          .input_width(input_width)
+          .input_height(6)
+          .qmax(128)
+          .Test(xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
 #if XNN_ARCH_ARM64
   TEST(F32_CONV_HWC2CHW_3X3S2P1C3X4__NEONFMA_2X2, input_width_eq_4) {
     TEST_REQUIRES_ARM_NEON_FMA;
diff --git a/test/f32-dwconv-minmax.cc b/test/f32-dwconv-minmax.cc
index d312c59..7facefe 100644
--- a/test/f32-dwconv-minmax.cc
+++ b/test/f32-dwconv-minmax.cc
@@ -13965,100 +13965,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, c_eq_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, c_eq_4) {
     DWConvMicrokernelTester()
       .cr(4)
       .kr(25)
       .channels(4)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, c_div_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, c_div_4) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, c_div_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, c_div_4_with_qmin) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, c_div_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, c_div_4_with_qmax) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, c_lt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, c_lt_4) {
     for (uint32_t channels = 1; channels < 4; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, c_gt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, c_gt_4) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, c_gt_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, c_gt_4_with_qmin) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, c_gt_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, c_gt_4_with_qmax) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       for (size_t step = 2; step <= 25; step++) {
         DWConvMicrokernelTester()
@@ -14067,12 +14067,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -14080,11 +14080,11 @@
         .channels(4)
         .width(5)
         .output_stride(23)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -14092,11 +14092,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -14104,22 +14104,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, input_offset) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .input_offset(112)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_ARM, zero) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ARM_ACC2, zero) {
     for (uint32_t mz = 0; mz < 25; mz++) {
       for (uint32_t channels = 8; channels < 64; channels += 12) {
         DWConvMicrokernelTester()
@@ -14128,7 +14128,7 @@
           .channels(channels)
           .input_offset(112)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2);
       }
     }
   }
@@ -14307,100 +14307,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, c_eq_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, c_eq_8) {
     DWConvMicrokernelTester()
       .cr(8)
       .kr(25)
       .channels(8)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, c_div_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, c_div_8) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, c_div_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, c_div_8_with_qmin) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, c_div_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, c_div_8_with_qmax) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, c_lt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, c_lt_8) {
     for (uint32_t channels = 1; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, c_gt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, c_gt_8) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, c_gt_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, c_gt_8_with_qmin) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, c_gt_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, c_gt_8_with_qmax) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       for (size_t step = 2; step <= 25; step++) {
         DWConvMicrokernelTester()
@@ -14409,12 +14409,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -14422,11 +14422,11 @@
         .channels(8)
         .width(5)
         .output_stride(43)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -14434,11 +14434,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -14446,22 +14446,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, input_offset) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .input_offset(176)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_ARM, zero) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ARM_ACC2, zero) {
     for (uint32_t mz = 0; mz < 25; mz++) {
       for (uint32_t channels = 16; channels < 128; channels += 24) {
         DWConvMicrokernelTester()
@@ -14470,7 +14470,7 @@
           .channels(channels)
           .input_offset(176)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2);
       }
     }
   }
@@ -14649,100 +14649,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, c_eq_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, c_eq_4) {
     DWConvMicrokernelTester()
       .cr(4)
       .kr(25)
       .channels(4)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, c_div_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, c_div_4) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, c_div_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, c_div_4_with_qmin) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, c_div_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, c_div_4_with_qmax) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, c_lt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, c_lt_4) {
     for (uint32_t channels = 1; channels < 4; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, c_gt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, c_gt_4) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, c_gt_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, c_gt_4_with_qmin) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, c_gt_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, c_gt_4_with_qmax) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       for (size_t step = 2; step <= 25; step++) {
         DWConvMicrokernelTester()
@@ -14751,12 +14751,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -14764,11 +14764,11 @@
         .channels(4)
         .width(5)
         .output_stride(23)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -14776,11 +14776,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -14788,22 +14788,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, input_offset) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(25)
         .channels(channels)
         .input_offset(112)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_ACC2_X86, zero) {
+  TEST(F32_DWCONV_MINMAX_UP4X25__WASMSIMD_X86_ACC2, zero) {
     for (uint32_t mz = 0; mz < 25; mz++) {
       for (uint32_t channels = 8; channels < 64; channels += 12) {
         DWConvMicrokernelTester()
@@ -14812,7 +14812,7 @@
           .channels(channels)
           .input_offset(112)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2);
       }
     }
   }
@@ -14991,100 +14991,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, c_eq_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, c_eq_8) {
     DWConvMicrokernelTester()
       .cr(8)
       .kr(25)
       .channels(8)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, c_div_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, c_div_8) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, c_div_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, c_div_8_with_qmin) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, c_div_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, c_div_8_with_qmax) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, c_lt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, c_lt_8) {
     for (uint32_t channels = 1; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, c_gt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, c_gt_8) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, c_gt_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, c_gt_8_with_qmin) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, c_gt_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, c_gt_8_with_qmax) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       for (size_t step = 2; step <= 25; step++) {
         DWConvMicrokernelTester()
@@ -15093,12 +15093,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -15106,11 +15106,11 @@
         .channels(8)
         .width(5)
         .output_stride(43)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -15118,11 +15118,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -15130,22 +15130,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, input_offset) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(25)
         .channels(channels)
         .input_offset(176)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_ACC2_X86, zero) {
+  TEST(F32_DWCONV_MINMAX_UP8X25__WASMSIMD_X86_ACC2, zero) {
     for (uint32_t mz = 0; mz < 25; mz++) {
       for (uint32_t channels = 16; channels < 128; channels += 24) {
         DWConvMicrokernelTester()
@@ -15154,7 +15154,7 @@
           .channels(channels)
           .input_offset(176)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2);
       }
     }
   }
@@ -15333,100 +15333,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, c_eq_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, c_eq_4) {
     DWConvMicrokernelTester()
       .cr(4)
       .kr(9)
       .channels(4)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, c_div_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, c_div_4) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, c_div_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, c_div_4_with_qmin) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, c_div_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, c_div_4_with_qmax) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, c_lt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, c_lt_4) {
     for (uint32_t channels = 1; channels < 4; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, c_gt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, c_gt_4) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, c_gt_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, c_gt_4_with_qmin) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, c_gt_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, c_gt_4_with_qmax) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       for (size_t step = 2; step <= 9; step++) {
         DWConvMicrokernelTester()
@@ -15435,12 +15435,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -15448,11 +15448,11 @@
         .channels(4)
         .width(5)
         .output_stride(23)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -15460,11 +15460,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -15472,22 +15472,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, input_offset) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .input_offset(112)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_ARM, zero) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ARM_ACC2, zero) {
     for (uint32_t mz = 0; mz < 9; mz++) {
       for (uint32_t channels = 8; channels < 64; channels += 12) {
         DWConvMicrokernelTester()
@@ -15496,7 +15496,7 @@
           .channels(channels)
           .input_offset(112)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2);
       }
     }
   }
@@ -15675,100 +15675,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, c_eq_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, c_eq_8) {
     DWConvMicrokernelTester()
       .cr(8)
       .kr(9)
       .channels(8)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, c_div_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, c_div_8) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, c_div_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, c_div_8_with_qmin) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, c_div_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, c_div_8_with_qmax) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, c_lt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, c_lt_8) {
     for (uint32_t channels = 1; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, c_gt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, c_gt_8) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, c_gt_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, c_gt_8_with_qmin) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, c_gt_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, c_gt_8_with_qmax) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       for (size_t step = 2; step <= 9; step++) {
         DWConvMicrokernelTester()
@@ -15777,12 +15777,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -15790,11 +15790,11 @@
         .channels(8)
         .width(5)
         .output_stride(43)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -15802,11 +15802,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -15814,22 +15814,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, input_offset) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .input_offset(176)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_ARM, zero) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ARM_ACC2, zero) {
     for (uint32_t mz = 0; mz < 9; mz++) {
       for (uint32_t channels = 16; channels < 128; channels += 24) {
         DWConvMicrokernelTester()
@@ -15838,7 +15838,7 @@
           .channels(channels)
           .input_offset(176)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2);
       }
     }
   }
@@ -16017,100 +16017,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, c_eq_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, c_eq_4) {
     DWConvMicrokernelTester()
       .cr(4)
       .kr(9)
       .channels(4)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, c_div_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, c_div_4) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, c_div_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, c_div_4_with_qmin) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, c_div_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, c_div_4_with_qmax) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, c_lt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, c_lt_4) {
     for (uint32_t channels = 1; channels < 4; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, c_gt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, c_gt_4) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, c_gt_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, c_gt_4_with_qmin) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, c_gt_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, c_gt_4_with_qmax) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       for (size_t step = 2; step <= 9; step++) {
         DWConvMicrokernelTester()
@@ -16119,12 +16119,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -16132,11 +16132,11 @@
         .channels(4)
         .width(5)
         .output_stride(23)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -16144,11 +16144,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -16156,22 +16156,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, input_offset) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(9)
         .channels(channels)
         .input_offset(112)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_ACC2_X86, zero) {
+  TEST(F32_DWCONV_MINMAX_UP4X9__WASMSIMD_X86_ACC2, zero) {
     for (uint32_t mz = 0; mz < 9; mz++) {
       for (uint32_t channels = 8; channels < 64; channels += 12) {
         DWConvMicrokernelTester()
@@ -16180,7 +16180,7 @@
           .channels(channels)
           .input_offset(112)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2);
       }
     }
   }
@@ -16359,100 +16359,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, c_eq_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, c_eq_8) {
     DWConvMicrokernelTester()
       .cr(8)
       .kr(9)
       .channels(8)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, c_div_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, c_div_8) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, c_div_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, c_div_8_with_qmin) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, c_div_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, c_div_8_with_qmax) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, c_lt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, c_lt_8) {
     for (uint32_t channels = 1; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, c_gt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, c_gt_8) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, c_gt_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, c_gt_8_with_qmin) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, c_gt_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, c_gt_8_with_qmax) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       for (size_t step = 2; step <= 9; step++) {
         DWConvMicrokernelTester()
@@ -16461,12 +16461,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -16474,11 +16474,11 @@
         .channels(8)
         .width(5)
         .output_stride(43)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -16486,11 +16486,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -16498,22 +16498,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, input_offset) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(9)
         .channels(channels)
         .input_offset(176)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_ACC2_X86, zero) {
+  TEST(F32_DWCONV_MINMAX_UP8X9__WASMSIMD_X86_ACC2, zero) {
     for (uint32_t mz = 0; mz < 9; mz++) {
       for (uint32_t channels = 16; channels < 128; channels += 24) {
         DWConvMicrokernelTester()
@@ -16522,7 +16522,7 @@
           .channels(channels)
           .input_offset(176)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2);
       }
     }
   }
@@ -16701,100 +16701,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, c_eq_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, c_eq_4) {
     DWConvMicrokernelTester()
       .cr(4)
       .kr(4)
       .channels(4)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, c_div_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, c_div_4) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, c_div_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, c_div_4_with_qmin) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, c_div_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, c_div_4_with_qmax) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, c_lt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, c_lt_4) {
     for (uint32_t channels = 1; channels < 4; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, c_gt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, c_gt_4) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, c_gt_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, c_gt_4_with_qmin) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, c_gt_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, c_gt_4_with_qmax) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       for (size_t step = 2; step <= 4; step++) {
         DWConvMicrokernelTester()
@@ -16803,12 +16803,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -16816,11 +16816,11 @@
         .channels(4)
         .width(5)
         .output_stride(23)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -16828,11 +16828,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -16840,22 +16840,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, input_offset) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .input_offset(112)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_ARM, zero) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ARM_ACC2, zero) {
     for (uint32_t mz = 0; mz < 4; mz++) {
       for (uint32_t channels = 8; channels < 64; channels += 12) {
         DWConvMicrokernelTester()
@@ -16864,7 +16864,7 @@
           .channels(channels)
           .input_offset(112)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2);
       }
     }
   }
@@ -17043,100 +17043,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, c_eq_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, c_eq_8) {
     DWConvMicrokernelTester()
       .cr(8)
       .kr(4)
       .channels(8)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, c_div_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, c_div_8) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, c_div_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, c_div_8_with_qmin) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, c_div_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, c_div_8_with_qmax) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, c_lt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, c_lt_8) {
     for (uint32_t channels = 1; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, c_gt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, c_gt_8) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, c_gt_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, c_gt_8_with_qmin) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, c_gt_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, c_gt_8_with_qmax) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       for (size_t step = 2; step <= 4; step++) {
         DWConvMicrokernelTester()
@@ -17145,12 +17145,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -17158,11 +17158,11 @@
         .channels(8)
         .width(5)
         .output_stride(43)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -17170,11 +17170,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -17182,22 +17182,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, input_offset) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .input_offset(176)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_ARM, zero) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ARM_ACC2, zero) {
     for (uint32_t mz = 0; mz < 4; mz++) {
       for (uint32_t channels = 16; channels < 128; channels += 24) {
         DWConvMicrokernelTester()
@@ -17206,7 +17206,7 @@
           .channels(channels)
           .input_offset(176)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2);
       }
     }
   }
@@ -17385,100 +17385,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, c_eq_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, c_eq_4) {
     DWConvMicrokernelTester()
       .cr(4)
       .kr(4)
       .channels(4)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, c_div_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, c_div_4) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, c_div_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, c_div_4_with_qmin) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, c_div_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, c_div_4_with_qmax) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, c_lt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, c_lt_4) {
     for (uint32_t channels = 1; channels < 4; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, c_gt_4) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, c_gt_4) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, c_gt_4_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, c_gt_4_with_qmin) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, c_gt_4_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, c_gt_4_with_qmax) {
     for (uint32_t channels = 5; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       for (size_t step = 2; step <= 4; step++) {
         DWConvMicrokernelTester()
@@ -17487,12 +17487,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -17500,11 +17500,11 @@
         .channels(4)
         .width(5)
         .output_stride(23)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -17512,11 +17512,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 20; channels += 3) {
       DWConvMicrokernelTester()
         .cr(4)
@@ -17524,22 +17524,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, input_offset) {
     for (uint32_t channels = 8; channels < 64; channels += 12) {
       DWConvMicrokernelTester()
         .cr(4)
         .kr(4)
         .channels(channels)
         .input_offset(112)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_ACC2_X86, zero) {
+  TEST(F32_DWCONV_MINMAX_UP4X4__WASMSIMD_X86_ACC2, zero) {
     for (uint32_t mz = 0; mz < 4; mz++) {
       for (uint32_t channels = 8; channels < 64; channels += 12) {
         DWConvMicrokernelTester()
@@ -17548,7 +17548,7 @@
           .channels(channels)
           .input_offset(112)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2);
       }
     }
   }
@@ -17727,100 +17727,100 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, c_eq_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, c_eq_8) {
     DWConvMicrokernelTester()
       .cr(8)
       .kr(4)
       .channels(8)
-      .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+      .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, c_div_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, c_div_8) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, c_div_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, c_div_8_with_qmin) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, c_div_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, c_div_8_with_qmax) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, c_lt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, c_lt_8) {
     for (uint32_t channels = 1; channels < 8; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, c_gt_8) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, c_gt_8) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, c_gt_8_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, c_gt_8_with_qmin) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, c_gt_8_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, c_gt_8_with_qmax) {
     for (uint32_t channels = 9; channels < 16; channels++) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, multipixel) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, multipixel) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .width(3)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, multipixel_with_step) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, multipixel_with_step) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       for (size_t step = 2; step <= 4; step++) {
         DWConvMicrokernelTester()
@@ -17829,12 +17829,12 @@
           .channels(channels)
           .width(3)
           .step(step)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, multipixel_with_output_stride) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, multipixel_with_output_stride) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -17842,11 +17842,11 @@
         .channels(8)
         .width(5)
         .output_stride(43)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, multipixel_with_qmin) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, multipixel_with_qmin) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -17854,11 +17854,11 @@
         .channels(channels)
         .width(3)
         .qmin(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, multipixel_with_qmax) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, multipixel_with_qmax) {
     for (size_t channels = 1; channels <= 40; channels += 7) {
       DWConvMicrokernelTester()
         .cr(8)
@@ -17866,22 +17866,22 @@
         .channels(channels)
         .width(3)
         .qmax(128)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, input_offset) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, input_offset) {
     for (uint32_t channels = 16; channels < 128; channels += 24) {
       DWConvMicrokernelTester()
         .cr(8)
         .kr(4)
         .channels(channels)
         .input_offset(176)
-        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+        .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
     }
   }
 
-  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_ACC2_X86, zero) {
+  TEST(F32_DWCONV_MINMAX_UP8X4__WASMSIMD_X86_ACC2, zero) {
     for (uint32_t mz = 0; mz < 4; mz++) {
       for (uint32_t channels = 16; channels < 128; channels += 24) {
         DWConvMicrokernelTester()
@@ -17890,7 +17890,7 @@
           .channels(channels)
           .input_offset(176)
           .zero_index(mz)
-          .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86);
+          .Test(xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2);
       }
     }
   }
diff --git a/test/f32-dwconv-minmax.yaml b/test/f32-dwconv-minmax.yaml
index fdaccc7..53f0af8 100644
--- a/test/f32-dwconv-minmax.yaml
+++ b/test/f32-dwconv-minmax.yaml
@@ -80,29 +80,29 @@
 - name: xnn_f32_dwconv_minmax_ukernel_up32x4__avx512f
 - name: xnn_f32_dwconv_minmax_ukernel_up32x4__avx512f_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm
-- name: xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_arm
+- name: xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_arm_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm
-- name: xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_arm
+- name: xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_arm_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86
-- name: xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_acc2_x86
+- name: xnn_f32_dwconv_minmax_ukernel_up4x25__wasmsimd_x86_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86
-- name: xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_acc2_x86
+- name: xnn_f32_dwconv_minmax_ukernel_up8x25__wasmsimd_x86_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm
-- name: xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_arm
+- name: xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_arm_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm
-- name: xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_arm
+- name: xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_arm_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86
-- name: xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_acc2_x86
+- name: xnn_f32_dwconv_minmax_ukernel_up4x9__wasmsimd_x86_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86
-- name: xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_acc2_x86
+- name: xnn_f32_dwconv_minmax_ukernel_up8x9__wasmsimd_x86_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm
-- name: xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_arm
+- name: xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm
-- name: xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_arm
+- name: xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86
-- name: xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_acc2_x86
+- name: xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86
-- name: xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_acc2_x86
+- name: xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up1x4__wasm
 - name: xnn_f32_dwconv_minmax_ukernel_up1x4__wasm_acc2
 - name: xnn_f32_dwconv_minmax_ukernel_up2x4__wasm
diff --git a/test/f32-dwconv2d-chw.cc b/test/f32-dwconv2d-chw.cc
index 7b03d03..80b3e82 100644
--- a/test/f32-dwconv2d-chw.cc
+++ b/test/f32-dwconv2d-chw.cc
@@ -16732,7 +16732,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(1)
@@ -16743,10 +16743,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -16758,11 +16758,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -16774,11 +16774,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -16790,11 +16790,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4, output_height_gt_1) {
     for (size_t input_height = 2; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -16807,7 +16807,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4);
       }
     }
   }
@@ -16815,7 +16815,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(2)
@@ -16826,10 +16826,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -16841,11 +16841,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -16857,11 +16857,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -16873,11 +16873,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4, output_height_div_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4, output_height_div_2) {
     for (size_t input_height = 4; input_height < 16; input_height += 2) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -16890,12 +16890,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4, output_height_lt_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4, output_height_lt_2) {
     for (size_t input_height = 1; input_height < 2; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -16908,12 +16908,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4, output_height_gt_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4, output_height_gt_2) {
     for (size_t input_height = 3; input_height < 5; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -16926,7 +16926,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4);
       }
     }
   }
@@ -16934,7 +16934,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_3X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_3X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(3)
@@ -16945,10 +16945,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_3X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_3X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -16960,11 +16960,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_3X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_3X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -16976,11 +16976,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_3X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_3X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -16992,11 +16992,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_3X4, output_height_div_3) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_3X4, output_height_div_3) {
     for (size_t input_height = 6; input_height < 24; input_height += 3) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17009,12 +17009,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_3X4, output_height_lt_3) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_3X4, output_height_lt_3) {
     for (size_t input_height = 1; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17027,12 +17027,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_3X4, output_height_gt_3) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_3X4, output_height_gt_3) {
     for (size_t input_height = 4; input_height < 7; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17045,7 +17045,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4);
       }
     }
   }
@@ -17053,7 +17053,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_4X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_4X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(4)
@@ -17064,10 +17064,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_4X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_4X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17079,11 +17079,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_4X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_4X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -17095,11 +17095,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_4X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_4X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17111,11 +17111,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_4X4, output_height_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_4X4, output_height_div_4) {
     for (size_t input_height = 8; input_height < 32; input_height += 4) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17128,12 +17128,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_4X4, output_height_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_4X4, output_height_lt_4) {
     for (size_t input_height = 1; input_height < 4; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17146,12 +17146,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_4X4, output_height_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_4X4, output_height_gt_4) {
     for (size_t input_height = 5; input_height < 9; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17164,7 +17164,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4);
       }
     }
   }
@@ -17172,7 +17172,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_5X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_5X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(5)
@@ -17183,10 +17183,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_5X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_5X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17198,11 +17198,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_5X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_5X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -17214,11 +17214,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_5X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_5X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17230,11 +17230,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_5X4, output_height_div_5) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_5X4, output_height_div_5) {
     for (size_t input_height = 10; input_height < 40; input_height += 5) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17247,12 +17247,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_5X4, output_height_lt_5) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_5X4, output_height_lt_5) {
     for (size_t input_height = 1; input_height < 5; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17265,12 +17265,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_5X4, output_height_gt_5) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_5X4, output_height_gt_5) {
     for (size_t input_height = 6; input_height < 11; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17283,7 +17283,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4);
       }
     }
   }
@@ -17291,7 +17291,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_6X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_6X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(6)
@@ -17302,10 +17302,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_6X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_6X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17317,11 +17317,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_6X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_6X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -17333,11 +17333,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_6X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_6X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17349,11 +17349,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_6X4, output_height_div_6) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_6X4, output_height_div_6) {
     for (size_t input_height = 12; input_height < 48; input_height += 6) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17366,12 +17366,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_6X4, output_height_lt_6) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_6X4, output_height_lt_6) {
     for (size_t input_height = 1; input_height < 6; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17384,12 +17384,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_6X4, output_height_gt_6) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_6X4, output_height_gt_6) {
     for (size_t input_height = 7; input_height < 13; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17402,7 +17402,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4);
       }
     }
   }
@@ -17410,7 +17410,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC2, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(1)
@@ -17421,10 +17421,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc2);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC2, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17436,11 +17436,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC2, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -17452,11 +17452,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC2, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17468,11 +17468,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC2, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_height_gt_1) {
     for (size_t input_height = 2; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17485,7 +17485,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2);
       }
     }
   }
@@ -17493,7 +17493,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC3, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(1)
@@ -17504,10 +17504,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC3, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17519,11 +17519,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC3, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -17535,11 +17535,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC3, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17551,11 +17551,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC3, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_height_gt_1) {
     for (size_t input_height = 2; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17568,7 +17568,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3);
       }
     }
   }
@@ -17576,7 +17576,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(1)
@@ -17587,10 +17587,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17602,11 +17602,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -17618,11 +17618,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17634,11 +17634,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_1X4_ACC4, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_height_gt_1) {
     for (size_t input_height = 2; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17651,7 +17651,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4);
       }
     }
   }
@@ -17659,7 +17659,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4_ACC2, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(2)
@@ -17670,10 +17670,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4_ACC2, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17685,11 +17685,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4_ACC2, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -17701,11 +17701,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4_ACC2, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17717,11 +17717,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4_ACC2, output_height_div_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_div_2) {
     for (size_t input_height = 4; input_height < 16; input_height += 2) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17734,12 +17734,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4_ACC2, output_height_lt_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_lt_2) {
     for (size_t input_height = 1; input_height < 2; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17752,12 +17752,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_2X4_ACC2, output_height_gt_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_gt_2) {
     for (size_t input_height = 3; input_height < 5; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17770,7 +17770,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2);
       }
     }
   }
@@ -17778,7 +17778,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(1)
@@ -17789,10 +17789,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17804,11 +17804,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -17820,11 +17820,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17836,11 +17836,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4, output_height_gt_1) {
     for (size_t input_height = 2; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17853,7 +17853,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4);
       }
     }
   }
@@ -17861,7 +17861,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(2)
@@ -17872,10 +17872,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17887,11 +17887,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -17903,11 +17903,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -17919,11 +17919,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4, output_height_div_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4, output_height_div_2) {
     for (size_t input_height = 4; input_height < 16; input_height += 2) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17936,12 +17936,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4, output_height_lt_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4, output_height_lt_2) {
     for (size_t input_height = 1; input_height < 2; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17954,12 +17954,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4, output_height_gt_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4, output_height_gt_2) {
     for (size_t input_height = 3; input_height < 5; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -17972,7 +17972,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4);
       }
     }
   }
@@ -17980,7 +17980,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_3X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_3X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(3)
@@ -17991,10 +17991,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_3X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_3X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18006,11 +18006,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_3X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_3X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -18022,11 +18022,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_3X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_3X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18038,11 +18038,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_3X4, output_height_div_3) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_3X4, output_height_div_3) {
     for (size_t input_height = 6; input_height < 24; input_height += 3) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18055,12 +18055,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_3X4, output_height_lt_3) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_3X4, output_height_lt_3) {
     for (size_t input_height = 1; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18073,12 +18073,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_3X4, output_height_gt_3) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_3X4, output_height_gt_3) {
     for (size_t input_height = 4; input_height < 7; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18091,7 +18091,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4);
       }
     }
   }
@@ -18099,7 +18099,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_4X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_4X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(4)
@@ -18110,10 +18110,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_4X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_4X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18125,11 +18125,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_4X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_4X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -18141,11 +18141,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_4X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_4X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18157,11 +18157,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_4X4, output_height_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_4X4, output_height_div_4) {
     for (size_t input_height = 8; input_height < 32; input_height += 4) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18174,12 +18174,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_4X4, output_height_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_4X4, output_height_lt_4) {
     for (size_t input_height = 1; input_height < 4; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18192,12 +18192,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_4X4, output_height_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_4X4, output_height_gt_4) {
     for (size_t input_height = 5; input_height < 9; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18210,7 +18210,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4);
       }
     }
   }
@@ -18218,7 +18218,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_5X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_5X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(5)
@@ -18229,10 +18229,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_5X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_5X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18244,11 +18244,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_5X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_5X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -18260,11 +18260,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_5X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_5X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18276,11 +18276,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_5X4, output_height_div_5) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_5X4, output_height_div_5) {
     for (size_t input_height = 10; input_height < 40; input_height += 5) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18293,12 +18293,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_5X4, output_height_lt_5) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_5X4, output_height_lt_5) {
     for (size_t input_height = 1; input_height < 5; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18311,12 +18311,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_5X4, output_height_gt_5) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_5X4, output_height_gt_5) {
     for (size_t input_height = 6; input_height < 11; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18329,7 +18329,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4);
       }
     }
   }
@@ -18337,7 +18337,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_6X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_6X4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(6)
@@ -18348,10 +18348,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_6X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_6X4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18363,11 +18363,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_6X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_6X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -18379,11 +18379,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_6X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_6X4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18395,11 +18395,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_6X4, output_height_div_6) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_6X4, output_height_div_6) {
     for (size_t input_height = 12; input_height < 48; input_height += 6) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18412,12 +18412,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_6X4, output_height_lt_6) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_6X4, output_height_lt_6) {
     for (size_t input_height = 1; input_height < 6; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18430,12 +18430,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_6X4, output_height_gt_6) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_6X4, output_height_gt_6) {
     for (size_t input_height = 7; input_height < 13; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18448,7 +18448,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4);
       }
     }
   }
@@ -18456,7 +18456,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC2, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(1)
@@ -18467,10 +18467,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc2);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC2, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18482,11 +18482,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC2, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -18498,11 +18498,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC2, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18514,11 +18514,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC2, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_height_gt_1) {
     for (size_t input_height = 2; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18531,7 +18531,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2);
       }
     }
   }
@@ -18539,7 +18539,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC3, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(1)
@@ -18550,10 +18550,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC3, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18565,11 +18565,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC3, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -18581,11 +18581,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC3, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18597,11 +18597,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC3, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_height_gt_1) {
     for (size_t input_height = 2; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18614,7 +18614,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3);
       }
     }
   }
@@ -18622,7 +18622,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(1)
@@ -18633,10 +18633,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18648,11 +18648,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -18664,11 +18664,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18680,11 +18680,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_1X4_ACC4, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_height_gt_1) {
     for (size_t input_height = 2; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18697,7 +18697,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4);
       }
     }
   }
@@ -18705,7 +18705,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4_ACC2, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(2)
@@ -18716,10 +18716,10 @@
       .padding_right(1)
       .padding_top(1)
       .padding_bottom(1)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2);
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4_ACC2, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18731,11 +18731,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4_ACC2, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -18747,11 +18747,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4_ACC2, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18763,11 +18763,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4_ACC2, output_height_div_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_div_2) {
     for (size_t input_height = 4; input_height < 16; input_height += 2) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18780,12 +18780,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4_ACC2, output_height_lt_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_lt_2) {
     for (size_t input_height = 1; input_height < 2; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18798,12 +18798,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_2X4_ACC2, output_height_gt_2) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_gt_2) {
     for (size_t input_height = 3; input_height < 5; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -18816,7 +18816,7 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2);
       }
     }
   }
@@ -18824,7 +18824,2099 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_1X4_ACC3, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_5X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(5)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_5X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_5X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(5)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_5X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_5X4, output_height_div_5) {
+    for (size_t input_height = 10; input_height < 40; input_height += 5) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_5X4, output_height_lt_5) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_5X4, output_height_gt_5) {
+    for (size_t input_height = 6; input_height < 11; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_6X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(6)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_6X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_6X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_6X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_6X4, output_height_div_6) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_6X4, output_height_lt_6) {
+    for (size_t input_height = 1; input_height < 6; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_6X4, output_height_gt_6) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_3X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_4X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_5X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(5)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_5X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_5X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(5)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_5X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_5X4, output_height_div_5) {
+    for (size_t input_height = 10; input_height < 40; input_height += 5) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_5X4, output_height_lt_5) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_5X4, output_height_gt_5) {
+    for (size_t input_height = 6; input_height < 11; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_6X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(6)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_6X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_6X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_6X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_6X4, output_height_div_6) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_6X4, output_height_lt_6) {
+    for (size_t input_height = 1; input_height < 6; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_6X4, output_height_gt_6) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(3)
+      .kernel_width(3)
+      .subsampling(1)
+      .padding_left(1)
+      .padding_right(1)
+      .padding_top(1)
+      .padding_bottom(1)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(1)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(1)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_eq_4) {
     for (size_t input_width = 7; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18836,11 +20928,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_1X4_ACC3, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_div_4) {
     for (size_t input_width = 16; input_width < 64; input_width += 8) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18852,11 +20944,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_1X4_ACC3, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 7; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(8)
@@ -18868,11 +20960,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_1X4_ACC3, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_gt_4) {
     for (size_t input_width = 9; input_width < 17; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -18884,11 +20976,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_1X4_ACC3, output_height_eq_1) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_height_eq_1) {
     for (size_t input_height = 1; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 41; input_width += 7) {
         DWConv2DMicrokernelTester()
@@ -18901,12 +20993,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_1X4_ACC3, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4, output_height_gt_1) {
     for (size_t input_height = 3; input_height < 5; input_height++) {
       for (size_t input_width = 1; input_width < 41; input_width += 7) {
         DWConv2DMicrokernelTester()
@@ -18919,12 +21011,12 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_1X4_ACC3, padding_top_eq_1) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4, padding_top_eq_1) {
     for (size_t input_height = 2; input_height < 8; input_height++) {
       for (size_t input_width = 1; input_width < 41; input_width += 7) {
         DWConv2DMicrokernelTester()
@@ -18937,7 +21029,7 @@
           .padding_right(1)
           .padding_top(0)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4);
       }
     }
   }
@@ -18945,251 +21037,59 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_3X4, output_width_eq_4) {
-    DWConv2DMicrokernelTester()
-      .input_width(4)
-      .input_height(3)
-      .kernel_height(5)
-      .kernel_width(5)
-      .subsampling(1)
-      .padding_left(2)
-      .padding_right(2)
-      .padding_top(2)
-      .padding_bottom(2)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4);
-  }
-
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_3X4, output_width_div_4) {
-    for (size_t input_width = 8; input_width < 32; input_width += 4) {
-      DWConv2DMicrokernelTester()
-        .input_width(input_width)
-        .input_height(3)
-        .kernel_height(5)
-        .kernel_width(5)
-        .subsampling(1)
-        .padding_left(2)
-        .padding_right(2)
-        .padding_top(2)
-        .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4);
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_3X4, output_width_lt_4) {
-    for (size_t input_width = 1; input_width < 4; input_width++) {
-      DWConv2DMicrokernelTester()
-        .input_width(4)
-        .input_height(3)
-        .kernel_height(5)
-        .kernel_width(5)
-        .subsampling(1)
-        .padding_left(2)
-        .padding_right(2)
-        .padding_top(2)
-        .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4);
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_3X4, output_width_gt_4) {
-    for (size_t input_width = 5; input_width < 9; input_width++) {
-      DWConv2DMicrokernelTester()
-        .input_width(input_width)
-        .input_height(3)
-        .kernel_height(5)
-        .kernel_width(5)
-        .subsampling(1)
-        .padding_left(2)
-        .padding_right(2)
-        .padding_top(2)
-        .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4);
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_3X4, output_height_div_3) {
-    for (size_t input_height = 6; input_height < 24; input_height += 3) {
-      for (size_t input_width = 1; input_width < 21; input_width += 3) {
-        DWConv2DMicrokernelTester()
-          .input_width(input_width)
-          .input_height(input_height)
-          .kernel_height(5)
-          .kernel_width(5)
-          .subsampling(1)
-          .padding_left(2)
-          .padding_right(2)
-          .padding_top(2)
-          .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4);
-      }
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_3X4, output_height_lt_3) {
-    for (size_t input_height = 1; input_height < 3; input_height++) {
-      for (size_t input_width = 1; input_width < 21; input_width += 3) {
-        DWConv2DMicrokernelTester()
-          .input_width(input_width)
-          .input_height(input_height)
-          .kernel_height(5)
-          .kernel_width(5)
-          .subsampling(1)
-          .padding_left(2)
-          .padding_right(2)
-          .padding_top(2)
-          .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4);
-      }
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_3X4, output_height_gt_3) {
-    for (size_t input_height = 4; input_height < 7; input_height++) {
-      for (size_t input_width = 1; input_width < 21; input_width += 3) {
-        DWConv2DMicrokernelTester()
-          .input_width(input_width)
-          .input_height(input_height)
-          .kernel_height(5)
-          .kernel_width(5)
-          .subsampling(1)
-          .padding_left(2)
-          .padding_right(2)
-          .padding_top(2)
-          .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4);
-      }
-    }
-  }
-#endif  // XNN_ARCH_WASMSIMD
-
-
-#if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_1X4_ACC2, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_eq_4) {
     for (size_t input_width = 7; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
-        .input_height(2)
-        .kernel_height(5)
-        .kernel_width(5)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
         .subsampling(2)
-        .padding_left(2)
-        .padding_right(2)
-        .padding_top(2)
-        .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2);
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_1X4_ACC2, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_div_4) {
     for (size_t input_width = 16; input_width < 64; input_width += 8) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
-        .input_height(2)
-        .kernel_height(5)
-        .kernel_width(5)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
         .subsampling(2)
-        .padding_left(2)
-        .padding_right(2)
-        .padding_top(2)
-        .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2);
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_1X4_ACC2, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 7; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(8)
-        .input_height(2)
-        .kernel_height(5)
-        .kernel_width(5)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
         .subsampling(2)
-        .padding_left(2)
-        .padding_right(2)
-        .padding_top(2)
-        .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2);
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_1X4_ACC2, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_gt_4) {
     for (size_t input_width = 9; input_width < 17; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
-        .input_height(2)
-        .kernel_height(5)
-        .kernel_width(5)
-        .subsampling(2)
-        .padding_left(2)
-        .padding_right(2)
-        .padding_top(2)
-        .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2);
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_1X4_ACC2, output_height_eq_1) {
-    for (size_t input_height = 1; input_height < 3; input_height++) {
-      for (size_t input_width = 1; input_width < 41; input_width += 7) {
-        DWConv2DMicrokernelTester()
-          .input_width(input_width)
-          .input_height(input_height)
-          .kernel_height(5)
-          .kernel_width(5)
-          .subsampling(2)
-          .padding_left(2)
-          .padding_right(2)
-          .padding_top(2)
-          .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2);
-      }
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_1X4_ACC2, output_height_gt_1) {
-    for (size_t input_height = 3; input_height < 5; input_height++) {
-      for (size_t input_width = 1; input_width < 41; input_width += 7) {
-        DWConv2DMicrokernelTester()
-          .input_width(input_width)
-          .input_height(input_height)
-          .kernel_height(5)
-          .kernel_width(5)
-          .subsampling(2)
-          .padding_left(2)
-          .padding_right(2)
-          .padding_top(2)
-          .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2);
-      }
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_1X4_ACC2, padding_top_eq_1) {
-    for (size_t input_height = 2; input_height < 8; input_height++) {
-      for (size_t input_width = 1; input_width < 41; input_width += 7) {
-        DWConv2DMicrokernelTester()
-          .input_width(input_width)
-          .input_height(input_height)
-          .kernel_height(5)
-          .kernel_width(5)
-          .subsampling(2)
-          .padding_left(2)
-          .padding_right(2)
-          .padding_top(1)
-          .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2);
-      }
-    }
-  }
-#endif  // XNN_ARCH_WASMSIMD
-
-
-#if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_1X4_ACC3, output_width_eq_4) {
-    for (size_t input_width = 7; input_width < 9; input_width++) {
-      DWConv2DMicrokernelTester()
-        .input_width(input_width)
-        .input_height(2)
+        .input_height(4)
         .kernel_height(3)
         .kernel_width(3)
         .subsampling(2)
@@ -19197,77 +21097,11 @@
         .padding_right(1)
         .padding_top(1)
         .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_1X4_ACC3, output_width_div_4) {
-    for (size_t input_width = 16; input_width < 64; input_width += 8) {
-      DWConv2DMicrokernelTester()
-        .input_width(input_width)
-        .input_height(2)
-        .kernel_height(3)
-        .kernel_width(3)
-        .subsampling(2)
-        .padding_left(1)
-        .padding_right(1)
-        .padding_top(1)
-        .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3);
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_1X4_ACC3, output_width_lt_4) {
-    for (size_t input_width = 1; input_width < 7; input_width++) {
-      DWConv2DMicrokernelTester()
-        .input_width(8)
-        .input_height(2)
-        .kernel_height(3)
-        .kernel_width(3)
-        .subsampling(2)
-        .padding_left(1)
-        .padding_right(1)
-        .padding_top(1)
-        .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3);
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_1X4_ACC3, output_width_gt_4) {
-    for (size_t input_width = 9; input_width < 17; input_width++) {
-      DWConv2DMicrokernelTester()
-        .input_width(input_width)
-        .input_height(2)
-        .kernel_height(3)
-        .kernel_width(3)
-        .subsampling(2)
-        .padding_left(1)
-        .padding_right(1)
-        .padding_top(1)
-        .padding_bottom(1)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3);
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_1X4_ACC3, output_height_eq_1) {
-    for (size_t input_height = 1; input_height < 3; input_height++) {
-      for (size_t input_width = 1; input_width < 41; input_width += 7) {
-        DWConv2DMicrokernelTester()
-          .input_width(input_width)
-          .input_height(input_height)
-          .kernel_height(3)
-          .kernel_width(3)
-          .subsampling(2)
-          .padding_left(1)
-          .padding_right(1)
-          .padding_top(1)
-          .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3);
-      }
-    }
-  }
-
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_1X4_ACC3, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_eq_2) {
     for (size_t input_height = 3; input_height < 5; input_height++) {
       for (size_t input_width = 1; input_width < 41; input_width += 7) {
         DWConv2DMicrokernelTester()
@@ -19280,12 +21114,501 @@
           .padding_right(1)
           .padding_top(1)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_1X4_ACC3, padding_top_eq_1) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_3X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_eq_4) {
+    for (size_t input_height = 7; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 16; input_height < 64; input_height += 8) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 9; input_height < 17; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_4X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 26; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, padding_top_eq_1) {
     for (size_t input_height = 2; input_height < 8; input_height++) {
       for (size_t input_width = 1; input_width < 41; input_width += 7) {
         DWConv2DMicrokernelTester()
@@ -19298,7 +21621,7 @@
           .padding_right(1)
           .padding_top(0)
           .padding_bottom(1)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2);
       }
     }
   }
@@ -19306,7 +21629,4514 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_3X4, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_3X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_3X4, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_3X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_4X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_4X4, output_height_eq_4) {
+    for (size_t input_height = 7; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 16; input_height < 64; input_height += 8) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 9; input_height < 17; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_4X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 26; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_3X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_3X4, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_3X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_4X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_4X4, output_height_eq_4) {
+    for (size_t input_height = 7; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 16; input_height < 64; input_height += 8) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 9; input_height < 17; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_4X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 26; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_1X4_ACC4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_ARM_SPLAT_2X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_3X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_3X4, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_3X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_4X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(8)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_4X4, output_height_eq_4) {
+    for (size_t input_height = 7; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 16; input_height < 64; input_height += 8) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 9; input_height < 17; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_4X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 26; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_1X4_ACC4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(3)
+        .kernel_width(3)
+        .subsampling(2)
+        .padding_left(1)
+        .padding_right(1)
+        .padding_top(1)
+        .padding_bottom(1)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(1)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_3X3S2P1__WASMSIMD_X86_SPLAT_2X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(3)
+          .kernel_width(3)
+          .subsampling(2)
+          .padding_left(1)
+          .padding_right(1)
+          .padding_top(0)
+          .padding_bottom(1)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_width_eq_4) {
     DWConv2DMicrokernelTester()
       .input_width(4)
       .input_height(3)
@@ -19317,10 +26147,10 @@
       .padding_right(2)
       .padding_top(2)
       .padding_bottom(2)
-      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4);
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2);
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_3X4, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_width_div_4) {
     for (size_t input_width = 8; input_width < 32; input_width += 4) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -19332,11 +26162,11 @@
         .padding_right(2)
         .padding_top(2)
         .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_3X4, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 4; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(4)
@@ -19348,11 +26178,11 @@
         .padding_right(2)
         .padding_top(2)
         .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_3X4, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_width_gt_4) {
     for (size_t input_width = 5; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -19364,11 +26194,11 @@
         .padding_right(2)
         .padding_top(2)
         .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_3X4, output_height_div_3) {
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_height_div_3) {
     for (size_t input_height = 6; input_height < 24; input_height += 3) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -19381,12 +26211,12 @@
           .padding_right(2)
           .padding_top(2)
           .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_3X4, output_height_lt_3) {
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_height_lt_3) {
     for (size_t input_height = 1; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -19399,12 +26229,12 @@
           .padding_right(2)
           .padding_top(2)
           .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_3X4, output_height_gt_3) {
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_height_gt_3) {
     for (size_t input_height = 4; input_height < 7; input_height++) {
       for (size_t input_width = 1; input_width < 21; input_width += 3) {
         DWConv2DMicrokernelTester()
@@ -19417,7 +26247,7 @@
           .padding_right(2)
           .padding_top(2)
           .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2);
       }
     }
   }
@@ -19425,7 +26255,4584 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_1X4_ACC2, output_width_eq_4) {
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4_ACC2, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4_ACC2, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4_ACC2, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_5X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(5)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_5X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_5X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_5X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_5X4, output_height_div_5) {
+    for (size_t input_height = 10; input_height < 40; input_height += 5) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_5X4, output_height_lt_5) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_LOADSPLAT_5X4, output_height_gt_5) {
+    for (size_t input_height = 6; input_height < 11; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_height_div_3) {
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_height_gt_3) {
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4_ACC2, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4_ACC2, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4_ACC2, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_5X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(5)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_5X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_5X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_5X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_5X4, output_height_div_5) {
+    for (size_t input_height = 10; input_height < 40; input_height += 5) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_5X4, output_height_lt_5) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_LOADSPLAT_5X4, output_height_gt_5) {
+    for (size_t input_height = 6; input_height < 11; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_height_div_3) {
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_height_gt_3) {
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4_ACC2, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4_ACC2, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4_ACC2, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_5X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(5)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_5X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_5X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_5X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_5X4, output_height_div_5) {
+    for (size_t input_height = 10; input_height < 40; input_height += 5) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_5X4, output_height_lt_5) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_ARM_SPLAT_5X4, output_height_gt_5) {
+    for (size_t input_height = 6; input_height < 11; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(1)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(1)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 2; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(2)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 4; input_height < 16; input_height += 2) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 2; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_height_div_3) {
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_height_gt_3) {
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(3)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(3)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 6; input_height < 24; input_height += 3) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 4; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4_ACC2, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4_ACC2, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4_ACC2, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4_ACC2, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(4)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4, output_height_div_4) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4, output_height_lt_4) {
+    for (size_t input_height = 1; input_height < 4; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_4X4, output_height_gt_4) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_5X4, output_width_eq_4) {
+    DWConv2DMicrokernelTester()
+      .input_width(4)
+      .input_height(5)
+      .kernel_height(5)
+      .kernel_width(5)
+      .subsampling(1)
+      .padding_left(2)
+      .padding_right(2)
+      .padding_top(2)
+      .padding_bottom(2)
+      .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4);
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_5X4, output_width_div_4) {
+    for (size_t input_width = 8; input_width < 32; input_width += 4) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_5X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 4; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(4)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_5X4, output_width_gt_4) {
+    for (size_t input_width = 5; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(5)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(1)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_5X4, output_height_div_5) {
+    for (size_t input_height = 10; input_height < 40; input_height += 5) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_5X4, output_height_lt_5) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5P2__WASMSIMD_X86_SPLAT_5X4, output_height_gt_5) {
+    for (size_t input_height = 6; input_height < 11; input_height++) {
+      for (size_t input_width = 1; input_width < 21; input_width += 3) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(1)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_eq_4) {
     for (size_t input_width = 7; input_width < 9; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -19437,11 +30844,11 @@
         .padding_right(2)
         .padding_top(2)
         .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_1X4_ACC2, output_width_div_4) {
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_div_4) {
     for (size_t input_width = 16; input_width < 64; input_width += 8) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -19453,11 +30860,11 @@
         .padding_right(2)
         .padding_top(2)
         .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_1X4_ACC2, output_width_lt_4) {
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_lt_4) {
     for (size_t input_width = 1; input_width < 7; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(8)
@@ -19469,11 +30876,11 @@
         .padding_right(2)
         .padding_top(2)
         .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_1X4_ACC2, output_width_gt_4) {
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_width_gt_4) {
     for (size_t input_width = 9; input_width < 17; input_width++) {
       DWConv2DMicrokernelTester()
         .input_width(input_width)
@@ -19485,11 +30892,11 @@
         .padding_right(2)
         .padding_top(2)
         .padding_bottom(2)
-        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2);
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4);
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_1X4_ACC2, output_height_eq_1) {
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_height_eq_1) {
     for (size_t input_height = 1; input_height < 3; input_height++) {
       for (size_t input_width = 1; input_width < 41; input_width += 7) {
         DWConv2DMicrokernelTester()
@@ -19502,12 +30909,12 @@
           .padding_right(2)
           .padding_top(2)
           .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_1X4_ACC2, output_height_gt_1) {
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4, output_height_gt_1) {
     for (size_t input_height = 3; input_height < 5; input_height++) {
       for (size_t input_width = 1; input_width < 41; input_width += 7) {
         DWConv2DMicrokernelTester()
@@ -19520,12 +30927,12 @@
           .padding_right(2)
           .padding_top(2)
           .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4);
       }
     }
   }
 
-  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_1X4_ACC2, padding_top_eq_1) {
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4, padding_top_eq_1) {
     for (size_t input_height = 2; input_height < 8; input_height++) {
       for (size_t input_width = 1; input_width < 41; input_width += 7) {
         DWConv2DMicrokernelTester()
@@ -19538,7 +30945,5446 @@
           .padding_right(2)
           .padding_top(1)
           .padding_bottom(2)
-          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2);
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_1X4_ACC5, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_2X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_LOADSPLAT_3X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_1X4_ACC5, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_2X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_LOADSPLAT_3X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_1X4_ACC5, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_2X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_ARM_SPLAT_3X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC2, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC3, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC4, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC4, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(2)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_height_eq_1) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC5, output_height_gt_1) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_1X4_ACC5, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 8; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC2, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(4)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_height_eq_2) {
+    for (size_t input_height = 3; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_height_div_2) {
+    for (size_t input_height = 8; input_height < 32; input_height += 4) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_height_lt_2) {
+    for (size_t input_height = 1; input_height < 3; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC3, output_height_gt_2) {
+    for (size_t input_height = 5; input_height < 9; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_2X4_ACC3, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 14; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_width_eq_4) {
+    for (size_t input_width = 7; input_width < 9; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_width_div_4) {
+    for (size_t input_width = 16; input_width < 64; input_width += 8) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_width_lt_4) {
+    for (size_t input_width = 1; input_width < 7; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(8)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_width_gt_4) {
+    for (size_t input_width = 9; input_width < 17; input_width++) {
+      DWConv2DMicrokernelTester()
+        .input_width(input_width)
+        .input_height(6)
+        .kernel_height(5)
+        .kernel_width(5)
+        .subsampling(2)
+        .padding_left(2)
+        .padding_right(2)
+        .padding_top(2)
+        .padding_bottom(2)
+        .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2);
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_height_eq_3) {
+    for (size_t input_height = 5; input_height < 7; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_height_div_3) {
+    for (size_t input_height = 12; input_height < 48; input_height += 6) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_height_lt_3) {
+    for (size_t input_height = 1; input_height < 5; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4_ACC2, output_height_gt_3) {
+    for (size_t input_height = 7; input_height < 13; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(2)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2);
+      }
+    }
+  }
+
+  TEST(F32_DWCONV2D_CHW_5X5S2P2__WASMSIMD_X86_SPLAT_3X4_ACC2, padding_top_eq_1) {
+    for (size_t input_height = 2; input_height < 20; input_height++) {
+      for (size_t input_width = 1; input_width < 41; input_width += 7) {
+        DWConv2DMicrokernelTester()
+          .input_width(input_width)
+          .input_height(input_height)
+          .kernel_height(5)
+          .kernel_width(5)
+          .subsampling(2)
+          .padding_left(2)
+          .padding_right(2)
+          .padding_top(1)
+          .padding_bottom(2)
+          .Test(xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2);
       }
     }
   }
diff --git a/test/f32-dwconv2d-chw.yaml b/test/f32-dwconv2d-chw.yaml
index 60a2bfb..ff2efc9 100644
--- a/test/f32-dwconv2d-chw.yaml
+++ b/test/f32-dwconv2d-chw.yaml
@@ -217,32 +217,170 @@
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc3
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc4
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_3x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_4x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_5x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_6x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc2
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc3
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_1x4_acc4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_2x4_acc2
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_3x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_4x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_5x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_6x4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc2
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc3
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_1x4_acc4
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_2x4_acc2
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_1x4_acc3
-- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_3x4
-- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_1x4_acc2
-- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_1x4_acc3
-- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_3x4
-- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_2x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_3x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_3x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_2x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_3x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc2
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_2x4_acc3
+- name: xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_3x4_acc2
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_1x1
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_2x1
 - name: xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_3x1
diff --git a/test/f32-gemm-minmax.cc b/test/f32-gemm-minmax.cc
index 17f750f..4d41ab6 100644
--- a/test/f32-gemm-minmax.cc
+++ b/test/f32-gemm-minmax.cc
@@ -28154,6 +28154,360 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, k_eq_1) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, k_eq_1_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .a_stride(3)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, k_eq_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(1)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, k_eq_1_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, k_eq_1_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, k_gt_1) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, k_gt_1_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, k_gt_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(7)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(7)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_LOAD1, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_load1);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMM_MINMAX_4X8__SSE_LOAD1, k_eq_1) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -28508,6 +28862,360 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, k_eq_1) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, k_eq_1_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .a_stride(3)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, k_eq_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(1)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, k_eq_1_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, k_eq_1_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(n)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, k_gt_1) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, k_gt_1_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, k_gt_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(7)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(7)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_LOAD1, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_load1);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMM_MINMAX_1X8__SSE_DUP, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -28964,6 +29672,462 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMM_MINMAX_4X8__SSE_DUP, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -29420,6 +30584,462 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMM_MINMAX_1X8S4__SSE, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -29876,6 +31496,462 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8S4__SSE, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8s4__sse);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMM_MINMAX_4X8S4__SSE, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -30332,6 +32408,462 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8S4__SSE, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8s4__sse);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMM_MINMAX_4X2C4__SSE, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -30788,6 +33320,1830 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_1X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_3X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_4X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMM_MINMAX_5X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMM_MINMAX_1X8__AVX_BROADCAST, k_eq_1) {
     TEST_REQUIRES_X86_AVX;
     GemmMicrokernelTester()
@@ -41462,7 +45818,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -41471,10 +45827,10 @@
       .m(1)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -41484,10 +45840,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -41497,10 +45853,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -41512,12 +45868,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -41528,11 +45884,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -41543,11 +45899,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -41557,11 +45913,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -41572,11 +45928,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41589,13 +45945,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41606,12 +45962,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41623,12 +45979,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41640,12 +45996,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -41658,13 +46014,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41675,12 +46031,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41692,12 +46048,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41709,12 +46065,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -41727,13 +46083,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41747,13 +46103,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -41763,10 +46119,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -41776,10 +46132,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -41789,13 +46145,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -41804,10 +46160,10 @@
       .m(3)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -41817,10 +46173,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -41830,10 +46186,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -41845,12 +46201,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -41861,11 +46217,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -41876,11 +46232,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -41890,11 +46246,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -41905,11 +46261,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41922,13 +46278,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41939,12 +46295,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41956,12 +46312,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41973,12 +46329,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -41991,13 +46347,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42008,12 +46364,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42025,12 +46381,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42042,12 +46398,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -42060,13 +46416,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42080,13 +46436,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -42096,10 +46452,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -42109,10 +46465,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -42122,13 +46478,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -42137,10 +46493,10 @@
       .m(4)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -42150,10 +46506,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -42163,10 +46519,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -42178,12 +46534,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -42194,11 +46550,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -42209,11 +46565,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -42223,11 +46579,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -42238,11 +46594,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42255,13 +46611,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42272,12 +46628,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42289,12 +46645,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42306,12 +46662,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -42324,13 +46680,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42341,12 +46697,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42358,12 +46714,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42375,12 +46731,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -42393,13 +46749,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42413,13 +46769,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -42429,10 +46785,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -42442,10 +46798,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -42455,13 +46811,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42470,10 +46826,10 @@
       .m(5)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42483,10 +46839,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42496,10 +46852,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -42511,12 +46867,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -42527,11 +46883,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -42542,11 +46898,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -42556,11 +46912,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -42571,11 +46927,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42588,13 +46944,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42605,12 +46961,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42622,12 +46978,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42639,12 +46995,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -42657,13 +47013,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42674,12 +47030,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42691,12 +47047,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42708,12 +47064,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -42726,13 +47082,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42746,13 +47102,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42762,10 +47118,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42775,10 +47131,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42788,13 +47144,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42803,10 +47159,10 @@
       .m(6)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42816,10 +47172,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42829,10 +47185,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -42844,12 +47200,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -42860,11 +47216,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -42875,11 +47231,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -42889,11 +47245,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -42904,11 +47260,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42921,13 +47277,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42938,12 +47294,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42955,12 +47311,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42972,12 +47328,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -42990,13 +47346,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43007,12 +47363,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43024,12 +47380,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43041,12 +47397,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -43059,13 +47415,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43079,13 +47435,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -43095,10 +47451,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -43108,10 +47464,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -43121,13 +47477,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -43136,10 +47492,10 @@
       .m(1)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -43149,10 +47505,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -43162,10 +47518,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -43177,12 +47533,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -43193,11 +47549,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -43208,11 +47564,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -43222,11 +47578,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -43237,11 +47593,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43254,13 +47610,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43271,12 +47627,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43288,12 +47644,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43305,12 +47661,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -43323,13 +47679,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43340,12 +47696,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43357,12 +47713,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43374,12 +47730,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -43392,13 +47748,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43412,13 +47768,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -43428,10 +47784,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -43441,10 +47797,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -43454,13 +47810,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43469,10 +47825,10 @@
       .m(3)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43482,10 +47838,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43495,10 +47851,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -43510,12 +47866,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -43526,11 +47882,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -43541,11 +47897,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -43555,11 +47911,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -43570,11 +47926,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43587,13 +47943,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43604,12 +47960,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43621,12 +47977,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43638,12 +47994,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -43656,13 +48012,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43673,12 +48029,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43690,12 +48046,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43707,12 +48063,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -43725,13 +48081,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43745,13 +48101,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43761,10 +48117,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43774,10 +48130,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43787,13 +48143,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43802,10 +48158,10 @@
       .m(4)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43815,10 +48171,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43828,10 +48184,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -43843,12 +48199,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43859,11 +48215,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43874,11 +48230,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43888,11 +48244,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43903,11 +48259,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43920,13 +48276,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43937,12 +48293,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43954,12 +48310,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43971,12 +48327,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -43989,13 +48345,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44006,12 +48362,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44023,12 +48379,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44040,12 +48396,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -44058,13 +48414,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44078,13 +48434,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -44094,10 +48450,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -44107,10 +48463,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -44120,13 +48476,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -44135,10 +48491,10 @@
       .m(5)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -44148,10 +48504,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -44161,10 +48517,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -44176,12 +48532,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -44192,11 +48548,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -44207,11 +48563,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -44221,11 +48577,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -44236,11 +48592,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44253,13 +48609,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44270,12 +48626,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44287,12 +48643,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44304,12 +48660,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -44322,13 +48678,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44339,12 +48695,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44356,12 +48712,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44373,12 +48729,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -44391,13 +48747,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44411,13 +48767,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -44427,10 +48783,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -44440,10 +48796,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -44453,13 +48809,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44468,10 +48824,10 @@
       .m(6)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44481,10 +48837,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44494,10 +48850,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -44509,12 +48865,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44525,11 +48881,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44540,11 +48896,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44554,11 +48910,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44569,11 +48925,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44586,13 +48942,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44603,12 +48959,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44620,12 +48976,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44637,12 +48993,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -44655,13 +49011,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44672,12 +49028,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44689,12 +49045,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44706,12 +49062,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -44724,13 +49080,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44744,13 +49100,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44760,10 +49116,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44773,10 +49129,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44786,13 +49142,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -44801,10 +49157,10 @@
       .m(1)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -44814,10 +49170,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -44827,10 +49183,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -44842,12 +49198,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44858,11 +49214,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44873,11 +49229,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44887,11 +49243,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44902,11 +49258,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44919,13 +49275,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44935,11 +49291,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44950,11 +49306,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44967,13 +49323,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44983,11 +49339,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44998,11 +49354,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45015,13 +49371,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45032,12 +49388,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45049,12 +49405,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45066,12 +49422,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -45084,13 +49440,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45101,12 +49457,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45118,12 +49474,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45135,12 +49491,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -45153,13 +49509,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45173,13 +49529,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -45189,10 +49545,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -45202,10 +49558,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -45215,13 +49571,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45230,10 +49586,10 @@
       .m(3)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45243,10 +49599,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45256,10 +49612,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -45271,12 +49627,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45287,11 +49643,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45302,11 +49658,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45316,11 +49672,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45331,11 +49687,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45348,13 +49704,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45364,11 +49720,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45379,11 +49735,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45396,13 +49752,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45412,11 +49768,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45427,11 +49783,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45444,13 +49800,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45461,12 +49817,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45478,12 +49834,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45495,12 +49851,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -45513,13 +49869,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45530,12 +49886,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45547,12 +49903,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45564,12 +49920,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -45582,13 +49938,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45602,13 +49958,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45618,10 +49974,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45631,10 +49987,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45644,13 +50000,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -45659,10 +50015,10 @@
       .m(4)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -45672,10 +50028,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -45685,10 +50041,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -45700,12 +50056,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45716,11 +50072,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45731,11 +50087,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45745,11 +50101,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45760,11 +50116,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45777,13 +50133,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45793,11 +50149,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45808,11 +50164,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45825,13 +50181,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45841,11 +50197,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45856,11 +50212,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45873,13 +50229,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45890,12 +50246,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45907,12 +50263,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45924,12 +50280,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -45942,13 +50298,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45959,12 +50315,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45976,12 +50332,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45993,12 +50349,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -46011,13 +50367,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46031,13 +50387,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -46047,10 +50403,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -46060,10 +50416,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -46073,13 +50429,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -46088,10 +50444,10 @@
       .m(5)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -46101,10 +50457,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -46114,10 +50470,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -46129,12 +50485,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46145,11 +50501,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46160,11 +50516,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46174,11 +50530,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46189,11 +50545,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46206,13 +50562,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46222,11 +50578,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46237,11 +50593,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46254,13 +50610,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46270,11 +50626,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46285,11 +50641,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46302,13 +50658,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46319,12 +50675,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46336,12 +50692,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46353,12 +50709,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -46371,13 +50727,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46388,12 +50744,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46405,12 +50761,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46422,12 +50778,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -46440,13 +50796,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46460,13 +50816,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -46476,10 +50832,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -46489,10 +50845,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -46502,13 +50858,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46517,10 +50873,10 @@
       .m(6)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46530,10 +50886,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46543,10 +50899,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -46558,12 +50914,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46574,11 +50930,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46589,11 +50945,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46603,11 +50959,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46618,11 +50974,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46635,13 +50991,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46651,11 +51007,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46666,11 +51022,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46683,13 +51039,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46699,11 +51055,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46714,11 +51070,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46731,13 +51087,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46748,12 +51104,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46765,12 +51121,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46782,12 +51138,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -46800,13 +51156,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46817,12 +51173,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46834,12 +51190,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46851,12 +51207,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -46869,13 +51225,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46889,13 +51245,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46905,10 +51261,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46918,10 +51274,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46931,13 +51287,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -46946,10 +51302,10 @@
       .m(1)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -46959,10 +51315,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -46972,10 +51328,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -46987,12 +51343,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47003,11 +51359,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47018,11 +51374,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47032,11 +51388,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47047,11 +51403,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47064,13 +51420,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47080,11 +51436,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47095,11 +51451,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47112,13 +51468,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47128,11 +51484,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47143,11 +51499,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47160,13 +51516,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47177,12 +51533,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47194,12 +51550,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47211,12 +51567,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -47229,13 +51585,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47246,12 +51602,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47263,12 +51619,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47280,12 +51636,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -47298,13 +51654,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47318,13 +51674,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -47334,10 +51690,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -47347,10 +51703,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -47360,13 +51716,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47375,10 +51731,10 @@
       .m(3)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47388,10 +51744,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47401,10 +51757,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -47416,12 +51772,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47432,11 +51788,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47447,11 +51803,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47461,11 +51817,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47476,11 +51832,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47493,13 +51849,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47509,11 +51865,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47524,11 +51880,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47541,13 +51897,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47557,11 +51913,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47572,11 +51928,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47589,13 +51945,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47606,12 +51962,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47623,12 +51979,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47640,12 +51996,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -47658,13 +52014,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47675,12 +52031,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47692,12 +52048,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47709,12 +52065,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -47727,13 +52083,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47747,13 +52103,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47763,10 +52119,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47776,10 +52132,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47789,13 +52145,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -47804,10 +52160,10 @@
       .m(4)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -47817,10 +52173,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -47830,10 +52186,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -47845,12 +52201,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47861,11 +52217,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47876,11 +52232,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47890,11 +52246,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47905,11 +52261,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47922,13 +52278,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47938,11 +52294,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47953,11 +52309,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47970,13 +52326,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47986,11 +52342,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -48001,11 +52357,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48018,13 +52374,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48035,12 +52391,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48052,12 +52408,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48069,12 +52425,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -48087,13 +52443,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48104,12 +52460,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48121,12 +52477,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48138,12 +52494,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -48156,13 +52512,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48176,13 +52532,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -48192,10 +52548,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -48205,10 +52561,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -48218,13 +52574,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48233,10 +52589,10 @@
       .m(5)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48246,10 +52602,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48259,10 +52615,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -48274,12 +52630,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48290,11 +52646,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48305,11 +52661,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48319,11 +52675,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48334,11 +52690,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48351,13 +52707,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48367,11 +52723,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48382,11 +52738,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48399,13 +52755,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48415,11 +52771,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48430,11 +52786,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48447,13 +52803,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48464,12 +52820,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48481,12 +52837,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48498,12 +52854,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -48516,13 +52872,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48533,12 +52889,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48550,12 +52906,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48567,12 +52923,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -48585,13 +52941,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48605,13 +52961,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48621,10 +52977,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48634,10 +52990,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48647,13 +53003,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -48662,10 +53018,10 @@
       .m(6)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -48675,10 +53031,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -48688,10 +53044,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -48703,12 +53059,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48719,11 +53075,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48734,11 +53090,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48748,11 +53104,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48763,11 +53119,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48780,13 +53136,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48796,11 +53152,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48811,11 +53167,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48828,13 +53184,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48844,11 +53200,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48859,11 +53215,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48876,13 +53232,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48893,12 +53249,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48910,12 +53266,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48927,12 +53283,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -48945,13 +53301,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48962,12 +53318,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48979,12 +53335,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48996,12 +53352,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -49014,13 +53370,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -49034,13 +53390,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -49050,10 +53406,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -49063,10 +53419,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -49076,7 +53432,7 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
diff --git a/test/f32-gemm-minmax.yaml b/test/f32-gemm-minmax.yaml
index e3ea5f7..18a6852 100644
--- a/test/f32-gemm-minmax.yaml
+++ b/test/f32-gemm-minmax.yaml
@@ -182,18 +182,38 @@
   k-block: 4
 - name: xnn_f32_gemm_minmax_ukernel_1x8__sse_load1
   k-block: 1
+- name: xnn_f32_gemm_minmax_ukernel_3x8__sse_load1
+  k-block: 1
 - name: xnn_f32_gemm_minmax_ukernel_4x8__sse_load1
   k-block: 1
+- name: xnn_f32_gemm_minmax_ukernel_5x8__sse_load1
+  k-block: 1
 - name: xnn_f32_gemm_minmax_ukernel_1x8__sse_dup
   k-block: 4
+- name: xnn_f32_gemm_minmax_ukernel_3x8__sse_dup
+  k-block: 4
 - name: xnn_f32_gemm_minmax_ukernel_4x8__sse_dup
   k-block: 4
+- name: xnn_f32_gemm_minmax_ukernel_5x8__sse_dup
+  k-block: 4
 - name: xnn_f32_gemm_minmax_ukernel_1x8s4__sse
   k-block: 4
+- name: xnn_f32_gemm_minmax_ukernel_3x8s4__sse
+  k-block: 4
 - name: xnn_f32_gemm_minmax_ukernel_4x8s4__sse
   k-block: 4
+- name: xnn_f32_gemm_minmax_ukernel_5x8s4__sse
+  k-block: 4
 - name: xnn_f32_gemm_minmax_ukernel_4x2c4__sse
   k-block: 4
+- name: xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup
+  k-block: 4
+- name: xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup
+  k-block: 4
+- name: xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup
+  k-block: 4
+- name: xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup
+  k-block: 4
 - name: xnn_f32_gemm_minmax_ukernel_1x8__avx_broadcast
   k-block: 1
 - name: xnn_f32_gemm_minmax_ukernel_4x8__avx_broadcast
@@ -252,45 +272,45 @@
   k-block: 1
 - name: xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_arm
+- name: xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_arm
+- name: xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_arm
+- name: xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_arm
+- name: xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_arm
+- name: xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_splat_x86
+- name: xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_splat_x86
+- name: xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_splat_x86
+- name: xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_splat_x86
+- name: xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_splat_x86
+- name: xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat
   k-block: 4
 - name: xnn_f32_gemm_minmax_ukernel_1x8s4__wasmsimd_arm
   k-block: 4
diff --git a/test/f32-gemminc-minmax.cc b/test/f32-gemminc-minmax.cc
index b2840f1..f9d4711 100644
--- a/test/f32-gemminc-minmax.cc
+++ b/test/f32-gemminc-minmax.cc
@@ -23807,6 +23807,360 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, k_eq_1) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, k_eq_1_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .a_stride(3)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, k_eq_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(1)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, k_eq_1_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, k_eq_1_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, k_gt_1) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, k_gt_1_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, k_gt_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(7)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(7)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_LOAD1, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMMINC_MINMAX_4X8__SSE_LOAD1, k_eq_1) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -24161,6 +24515,360 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, k_eq_1) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, k_eq_1_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .a_stride(3)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, k_eq_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(1)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, k_eq_1_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, k_eq_1_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(n)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, k_gt_1) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, k_gt_1_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, k_gt_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(7)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(7)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_LOAD1, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMMINC_MINMAX_1X8__SSE_DUP, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -24617,6 +25325,462 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMMINC_MINMAX_4X8__SSE_DUP, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -25073,6 +26237,462 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMMINC_MINMAX_1X8S4__SSE, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -25529,6 +27149,462 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8S4__SSE, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8s4__sse);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMMINC_MINMAX_4X8S4__SSE, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -25985,6 +28061,2286 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8S4__SSE, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8s4__sse);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_1X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_3X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_4X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_eq_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .a_stride(7)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_lt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_gt_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_div_4_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .a_stride(43)
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, n_gt_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, n_div_8_strided_a) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .a_stride(23)
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_GEMMINC_MINMAX_5X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_GEMMINC_MINMAX_1X8__AVX_BROADCAST, k_eq_1) {
     TEST_REQUIRES_X86_AVX;
     GemmMicrokernelTester()
@@ -36659,7 +41015,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -36668,10 +41024,10 @@
       .m(1)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -36681,10 +41037,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -36694,10 +41050,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -36709,12 +41065,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -36725,11 +41081,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -36740,11 +41096,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -36754,11 +41110,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -36769,11 +41125,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -36786,13 +41142,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -36803,12 +41159,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -36820,12 +41176,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -36837,12 +41193,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -36855,13 +41211,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -36872,12 +41228,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -36889,12 +41245,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -36906,12 +41262,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -36924,13 +41280,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -36944,13 +41300,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -36960,10 +41316,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -36973,10 +41329,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -36986,13 +41342,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -37001,10 +41357,10 @@
       .m(3)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -37014,10 +41370,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -37027,10 +41383,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -37042,12 +41398,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -37058,11 +41414,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -37073,11 +41429,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -37087,11 +41443,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -37102,11 +41458,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -37119,13 +41475,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37136,12 +41492,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37153,12 +41509,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37170,12 +41526,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -37188,13 +41544,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37205,12 +41561,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37222,12 +41578,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37239,12 +41595,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -37257,13 +41613,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -37277,13 +41633,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -37293,10 +41649,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -37306,10 +41662,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -37319,13 +41675,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -37334,10 +41690,10 @@
       .m(4)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -37347,10 +41703,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -37360,10 +41716,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -37375,12 +41731,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -37391,11 +41747,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -37406,11 +41762,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -37420,11 +41776,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -37435,11 +41791,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -37452,13 +41808,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37469,12 +41825,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37486,12 +41842,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37503,12 +41859,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -37521,13 +41877,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37538,12 +41894,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37555,12 +41911,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37572,12 +41928,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -37590,13 +41946,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -37610,13 +41966,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -37626,10 +41982,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -37639,10 +41995,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -37652,13 +42008,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -37667,10 +42023,10 @@
       .m(5)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -37680,10 +42036,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -37693,10 +42049,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -37708,12 +42064,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -37724,11 +42080,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -37739,11 +42095,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -37753,11 +42109,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -37768,11 +42124,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -37785,13 +42141,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37802,12 +42158,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37819,12 +42175,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37836,12 +42192,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -37854,13 +42210,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37871,12 +42227,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37888,12 +42244,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -37905,12 +42261,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -37923,13 +42279,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -37943,13 +42299,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -37959,10 +42315,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -37972,10 +42328,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -37985,13 +42341,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -38000,10 +42356,10 @@
       .m(6)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -38013,10 +42369,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -38026,10 +42382,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -38041,12 +42397,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -38057,11 +42413,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -38072,11 +42428,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -38086,11 +42442,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -38101,11 +42457,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -38118,13 +42474,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38135,12 +42491,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38152,12 +42508,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38169,12 +42525,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -38187,13 +42543,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38204,12 +42560,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38221,12 +42577,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38238,12 +42594,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -38256,13 +42612,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -38276,13 +42632,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -38292,10 +42648,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -38305,10 +42661,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -38318,13 +42674,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -38333,10 +42689,10 @@
       .m(1)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -38346,10 +42702,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -38359,10 +42715,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -38374,12 +42730,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -38390,11 +42746,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -38405,11 +42761,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -38419,11 +42775,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -38434,11 +42790,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -38451,13 +42807,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38468,12 +42824,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38485,12 +42841,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38502,12 +42858,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -38520,13 +42876,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38537,12 +42893,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38554,12 +42910,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38571,12 +42927,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -38589,13 +42945,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -38609,13 +42965,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -38625,10 +42981,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -38638,10 +42994,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -38651,13 +43007,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -38666,10 +43022,10 @@
       .m(3)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -38679,10 +43035,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -38692,10 +43048,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -38707,12 +43063,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -38723,11 +43079,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -38738,11 +43094,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -38752,11 +43108,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -38767,11 +43123,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -38784,13 +43140,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38801,12 +43157,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38818,12 +43174,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38835,12 +43191,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -38853,13 +43209,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38870,12 +43226,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38887,12 +43243,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -38904,12 +43260,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -38922,13 +43278,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -38942,13 +43298,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -38958,10 +43314,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -38971,10 +43327,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -38984,13 +43340,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -38999,10 +43355,10 @@
       .m(4)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -39012,10 +43368,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -39025,10 +43381,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -39040,12 +43396,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -39056,11 +43412,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -39071,11 +43427,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -39085,11 +43441,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -39100,11 +43456,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -39117,13 +43473,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39134,12 +43490,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39151,12 +43507,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39168,12 +43524,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -39186,13 +43542,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39203,12 +43559,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39220,12 +43576,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39237,12 +43593,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -39255,13 +43611,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -39275,13 +43631,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -39291,10 +43647,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -39304,10 +43660,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -39317,13 +43673,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -39332,10 +43688,10 @@
       .m(5)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -39345,10 +43701,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -39358,10 +43714,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -39373,12 +43729,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -39389,11 +43745,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -39404,11 +43760,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -39418,11 +43774,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -39433,11 +43789,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -39450,13 +43806,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39467,12 +43823,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39484,12 +43840,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39501,12 +43857,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -39519,13 +43875,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39536,12 +43892,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39553,12 +43909,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39570,12 +43926,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -39588,13 +43944,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -39608,13 +43964,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -39624,10 +43980,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -39637,10 +43993,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -39650,13 +44006,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -39665,10 +44021,10 @@
       .m(6)
       .n(8)
       .k(1)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -39678,10 +44034,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -39691,10 +44047,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -39706,12 +44062,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -39722,11 +44078,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -39737,11 +44093,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -39751,11 +44107,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_strided_a) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -39766,11 +44122,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -39783,13 +44139,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39800,12 +44156,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39817,12 +44173,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39834,12 +44190,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -39852,13 +44208,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39869,12 +44225,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39886,12 +44242,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -39903,12 +44259,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -39921,13 +44277,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -39941,13 +44297,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -39957,10 +44313,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -39970,10 +44326,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -39983,13 +44339,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -39998,10 +44354,10 @@
       .m(1)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -40011,10 +44367,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -40024,10 +44380,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -40039,12 +44395,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40055,11 +44411,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40070,11 +44426,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40084,11 +44440,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40099,11 +44455,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40116,13 +44472,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40132,11 +44488,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40147,11 +44503,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40164,13 +44520,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40180,11 +44536,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40195,11 +44551,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40212,13 +44568,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40229,12 +44585,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40246,12 +44602,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40263,12 +44619,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -40281,13 +44637,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40298,12 +44654,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40315,12 +44671,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40332,12 +44688,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -40350,13 +44706,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40370,13 +44726,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -40386,10 +44742,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -40399,10 +44755,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -40412,13 +44768,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -40427,10 +44783,10 @@
       .m(3)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -40440,10 +44796,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -40453,10 +44809,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -40468,12 +44824,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -40484,11 +44840,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -40499,11 +44855,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -40513,11 +44869,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -40528,11 +44884,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40545,13 +44901,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -40561,11 +44917,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -40576,11 +44932,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40593,13 +44949,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -40609,11 +44965,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -40624,11 +44980,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40641,13 +44997,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40658,12 +45014,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40675,12 +45031,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40692,12 +45048,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -40710,13 +45066,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40727,12 +45083,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40744,12 +45100,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -40761,12 +45117,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -40779,13 +45135,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40799,13 +45155,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -40815,10 +45171,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -40828,10 +45184,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -40841,13 +45197,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -40856,10 +45212,10 @@
       .m(4)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -40869,10 +45225,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -40882,10 +45238,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -40897,12 +45253,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -40913,11 +45269,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -40928,11 +45284,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -40942,11 +45298,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -40957,11 +45313,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40974,13 +45330,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -40990,11 +45346,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -41005,11 +45361,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41022,13 +45378,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -41038,11 +45394,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -41053,11 +45409,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41070,13 +45426,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41087,12 +45443,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41104,12 +45460,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41121,12 +45477,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -41139,13 +45495,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41156,12 +45512,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41173,12 +45529,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41190,12 +45546,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -41208,13 +45564,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41228,13 +45584,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -41244,10 +45600,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -41257,10 +45613,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -41270,13 +45626,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -41285,10 +45641,10 @@
       .m(5)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -41298,10 +45654,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -41311,10 +45667,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -41326,12 +45682,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -41342,11 +45698,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -41357,11 +45713,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -41371,11 +45727,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -41386,11 +45742,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41403,13 +45759,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -41419,11 +45775,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -41434,11 +45790,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41451,13 +45807,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -41467,11 +45823,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -41482,11 +45838,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41499,13 +45855,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41516,12 +45872,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41533,12 +45889,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41550,12 +45906,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -41568,13 +45924,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41585,12 +45941,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41602,12 +45958,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41619,12 +45975,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -41637,13 +45993,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41657,13 +46013,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -41673,10 +46029,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -41686,10 +46042,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -41699,13 +46055,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -41714,10 +46070,10 @@
       .m(6)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -41727,10 +46083,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -41740,10 +46096,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -41755,12 +46111,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -41771,11 +46127,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -41786,11 +46142,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -41800,11 +46156,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -41815,11 +46171,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41832,13 +46188,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -41848,11 +46204,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -41863,11 +46219,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41880,13 +46236,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -41896,11 +46252,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -41911,11 +46267,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41928,13 +46284,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41945,12 +46301,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41962,12 +46318,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -41979,12 +46335,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -41997,13 +46353,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42014,12 +46370,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42031,12 +46387,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42048,12 +46404,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -42066,13 +46422,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42086,13 +46442,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42102,10 +46458,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42115,10 +46471,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42128,13 +46484,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -42143,10 +46499,10 @@
       .m(1)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -42156,10 +46512,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -42169,10 +46525,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -42184,12 +46540,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42200,11 +46556,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42215,11 +46571,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42229,11 +46585,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42244,11 +46600,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42261,13 +46617,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42277,11 +46633,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42292,11 +46648,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42309,13 +46665,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42325,11 +46681,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42340,11 +46696,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42357,13 +46713,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42374,12 +46730,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42391,12 +46747,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42408,12 +46764,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -42426,13 +46782,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42443,12 +46799,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42460,12 +46816,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42477,12 +46833,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -42495,13 +46851,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42515,13 +46871,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -42531,10 +46887,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -42544,10 +46900,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_1X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -42557,13 +46913,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -42572,10 +46928,10 @@
       .m(3)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -42585,10 +46941,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -42598,10 +46954,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -42613,12 +46969,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -42629,11 +46985,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -42644,11 +47000,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -42658,11 +47014,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -42673,11 +47029,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42690,13 +47046,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -42706,11 +47062,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -42721,11 +47077,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42738,13 +47094,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -42754,11 +47110,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -42769,11 +47125,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42786,13 +47142,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42803,12 +47159,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42820,12 +47176,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42837,12 +47193,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -42855,13 +47211,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42872,12 +47228,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42889,12 +47245,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -42906,12 +47262,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -42924,13 +47280,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42944,13 +47300,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -42960,10 +47316,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -42973,10 +47329,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_3X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -42986,13 +47342,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43001,10 +47357,10 @@
       .m(4)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43014,10 +47370,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43027,10 +47383,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -43042,12 +47398,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43058,11 +47414,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43073,11 +47429,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43087,11 +47443,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43102,11 +47458,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43119,13 +47475,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43135,11 +47491,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43150,11 +47506,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43167,13 +47523,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43183,11 +47539,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43198,11 +47554,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43215,13 +47571,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43232,12 +47588,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43249,12 +47605,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43266,12 +47622,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -43284,13 +47640,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43301,12 +47657,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43318,12 +47674,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43335,12 +47691,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -43353,13 +47709,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43373,13 +47729,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43389,10 +47745,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43402,10 +47758,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43415,13 +47771,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -43430,10 +47786,10 @@
       .m(5)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -43443,10 +47799,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -43456,10 +47812,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -43471,12 +47827,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43487,11 +47843,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43502,11 +47858,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43516,11 +47872,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43531,11 +47887,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43548,13 +47904,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43564,11 +47920,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43579,11 +47935,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43596,13 +47952,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43612,11 +47968,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43627,11 +47983,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43644,13 +48000,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43661,12 +48017,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43678,12 +48034,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43695,12 +48051,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -43713,13 +48069,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43730,12 +48086,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43747,12 +48103,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -43764,12 +48120,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -43782,13 +48138,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43802,13 +48158,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -43818,10 +48174,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -43831,10 +48187,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_5X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -43844,13 +48200,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -43859,10 +48215,10 @@
       .m(6)
       .n(8)
       .k(4)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -43872,10 +48228,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_strided_a) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -43885,10 +48241,10 @@
       .n(8)
       .k(4)
       .a_stride(7)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -43900,12 +48256,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -43916,11 +48272,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -43931,11 +48287,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -43945,11 +48301,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_lt_4_strided_a) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -43960,11 +48316,11 @@
         .n(8)
         .k(k)
         .a_stride(7)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43977,13 +48333,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -43993,11 +48349,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_gt_4_strided_a) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44008,11 +48364,11 @@
         .n(8)
         .k(k)
         .a_stride(11)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44025,13 +48381,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44041,11 +48397,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_div_4_strided_a) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44056,11 +48412,11 @@
         .n(8)
         .k(k)
         .a_stride(43)
-        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44073,13 +48429,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44090,12 +48446,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44107,12 +48463,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44124,12 +48480,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -44142,13 +48498,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44159,12 +48515,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44176,12 +48532,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44193,12 +48549,12 @@
           .n(n)
           .k(k)
           .a_stride(23)
-          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -44211,13 +48567,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44231,13 +48587,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44247,10 +48603,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44260,10 +48616,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_GEMMINC_MINMAX_6X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44273,7 +48629,7 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
diff --git a/test/f32-gemminc-minmax.yaml b/test/f32-gemminc-minmax.yaml
index 4f13888..e7cc443 100644
--- a/test/f32-gemminc-minmax.yaml
+++ b/test/f32-gemminc-minmax.yaml
@@ -150,16 +150,36 @@
   k-block: 4
 - name: xnn_f32_gemminc_minmax_ukernel_1x8__sse_load1
   k-block: 1
+- name: xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1
+  k-block: 1
 - name: xnn_f32_gemminc_minmax_ukernel_4x8__sse_load1
   k-block: 1
+- name: xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1
+  k-block: 1
 - name: xnn_f32_gemminc_minmax_ukernel_1x8__sse_dup
   k-block: 4
+- name: xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup
+  k-block: 4
 - name: xnn_f32_gemminc_minmax_ukernel_4x8__sse_dup
   k-block: 4
+- name: xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup
+  k-block: 4
 - name: xnn_f32_gemminc_minmax_ukernel_1x8s4__sse
   k-block: 4
+- name: xnn_f32_gemminc_minmax_ukernel_3x8s4__sse
+  k-block: 4
 - name: xnn_f32_gemminc_minmax_ukernel_4x8s4__sse
   k-block: 4
+- name: xnn_f32_gemminc_minmax_ukernel_5x8s4__sse
+  k-block: 4
+- name: xnn_f32_gemminc_minmax_ukernel_1x8__sse2_dup
+  k-block: 4
+- name: xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup
+  k-block: 4
+- name: xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup
+  k-block: 4
+- name: xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup
+  k-block: 4
 - name: xnn_f32_gemminc_minmax_ukernel_1x8__avx_broadcast
   k-block: 1
 - name: xnn_f32_gemminc_minmax_ukernel_4x8__avx_broadcast
@@ -218,45 +238,45 @@
   k-block: 1
 - name: xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_arm
+- name: xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_splat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_splat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_splat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_splat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_splat_x86
+- name: xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat
   k-block: 4
 - name: xnn_f32_gemminc_minmax_ukernel_1x8s4__wasmsimd_arm
   k-block: 4
diff --git a/test/f32-igemm-minmax.cc b/test/f32-igemm-minmax.cc
index ad11149..7b93406 100644
--- a/test/f32-igemm-minmax.cc
+++ b/test/f32-igemm-minmax.cc
@@ -26313,6 +26313,404 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, k_eq_1) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, k_eq_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(1)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, k_eq_1_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, k_eq_1_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, k_gt_1) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, k_gt_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, a_offset) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(17)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, zero) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(17)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_LOAD1, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(1)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_load1);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_IGEMM_MINMAX_4X8__SSE_LOAD1, k_eq_1) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -26711,6 +27109,404 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, k_eq_1) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, k_eq_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(1)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, k_eq_1_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, k_eq_1_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(n)
+        .k(1)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, k_gt_1) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, k_gt_1_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 2; k < 10; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, a_offset) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 5; k += 2) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(29)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, zero) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t mz = 0; mz < 5; mz++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(29)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_LOAD1, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(1)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_load1);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_IGEMM_MINMAX_1X8__SSE_DUP, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -27179,6 +27975,474 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, a_offset) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(67)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, zero) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(67)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_IGEMM_MINMAX_4X8__SSE_DUP, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -27647,6 +28911,474 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, a_offset) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(103)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, zero) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t mz = 0; mz < 5; mz++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(103)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_IGEMM_MINMAX_1X8S4__SSE, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -28115,6 +29847,474 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, a_offset) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(67)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, zero) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(67)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8S4__SSE, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8s4__sse);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_IGEMM_MINMAX_4X8S4__SSE, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -28583,6 +30783,474 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_eq_4) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_lt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_gt_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_div_4) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, n_gt_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, n_div_8) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(4)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, a_offset) {
+    TEST_REQUIRES_X86_SSE;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(4)
+        .m(5)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(103)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, zero) {
+    TEST_REQUIRES_X86_SSE;
+    for (uint32_t mz = 0; mz < 5; mz++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(4)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(103)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, qmin) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, qmax) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8S4__SSE, strided_cm) {
+    TEST_REQUIRES_X86_SSE;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(4)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8s4__sse);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_IGEMM_MINMAX_4X2C4__SSE, k_eq_4) {
     TEST_REQUIRES_X86_SSE;
     GemmMicrokernelTester()
@@ -29051,6 +31719,1878 @@
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, a_offset) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(23)
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, zero) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(23)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_1X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, a_offset) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(67)
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, zero) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(67)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_3X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, a_offset) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, zero) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_4X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cn_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_eq_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 5; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(4)
+          .iterations(1)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_eq_4_subtile_m) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t m = 1; m <= 5; m++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_eq_4_subtile_n) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(n)
+        .k(4)
+        .iterations(1)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_lt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k < 4; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_gt_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 5; k < 8; k++) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, k_div_4_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 8; k <= 40; k += 4) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, n_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, n_gt_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, n_gt_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, n_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, n_div_8_strided_cn) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, n_div_8_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        for (uint32_t m = 1; m <= 5; m++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, small_kernel_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, n_gt_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, n_div_8_small_kernel) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, strided_cm_subtile) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      for (uint32_t m = 1; m <= 5; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(5)
+            .nr(8)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+        }
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, a_offset) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t k = 1; k <= 20; k += 5) {
+      GemmMicrokernelTester()
+        .mr(5)
+        .nr(8)
+        .kr(1)
+        .sr(1)
+        .m(5)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(103)
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, zero) {
+    TEST_REQUIRES_X86_SSE2;
+    for (uint32_t mz = 0; mz < 5; mz++) {
+      for (size_t k = 1; k <= 20; k += 5) {
+        GemmMicrokernelTester()
+          .mr(5)
+          .nr(8)
+          .kr(1)
+          .sr(1)
+          .m(5)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(103)
+          .zero_index(mz)
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+      }
+    }
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, qmin) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmin(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, qmax) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .qmax(128)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+  }
+
+  TEST(F32_IGEMM_MINMAX_5X8__SSE2_DUP, strided_cm) {
+    TEST_REQUIRES_X86_SSE2;
+    GemmMicrokernelTester()
+      .mr(5)
+      .nr(8)
+      .kr(1)
+      .sr(1)
+      .m(5)
+      .n(8)
+      .k(4)
+      .cm_stride(11)
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup);
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_IGEMM_MINMAX_1X8__AVX_BROADCAST, k_eq_1) {
     TEST_REQUIRES_X86_AVX;
     GemmMicrokernelTester()
@@ -40873,7 +45413,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -40882,10 +45422,10 @@
       .m(1)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -40895,10 +45435,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -40910,12 +45450,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40926,11 +45466,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40941,11 +45481,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -40955,11 +45495,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -40972,13 +45512,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -40989,12 +45529,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41006,12 +45546,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -41024,13 +45564,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41041,12 +45581,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41058,12 +45598,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -41076,13 +45616,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(1)
@@ -41093,11 +45633,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41111,13 +45651,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41129,12 +45669,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41146,12 +45686,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41165,13 +45705,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(1)
@@ -41183,11 +45723,11 @@
         .k(k)
         .ks(3)
         .a_offset(7)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 1; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41201,12 +45741,12 @@
           .ks(3)
           .a_offset(7)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -41216,10 +45756,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -41229,10 +45769,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -41242,13 +45782,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -41257,10 +45797,10 @@
       .m(3)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -41270,10 +45810,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -41285,12 +45825,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -41301,11 +45841,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -41316,11 +45856,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -41330,11 +45870,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41347,13 +45887,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41364,12 +45904,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41381,12 +45921,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -41399,13 +45939,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41416,12 +45956,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41433,12 +45973,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -41451,13 +45991,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(3)
@@ -41468,11 +46008,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41486,13 +46026,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41504,12 +46044,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41521,12 +46061,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41540,13 +46080,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(3)
@@ -41558,11 +46098,11 @@
         .k(k)
         .ks(3)
         .a_offset(17)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 3; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41576,12 +46116,12 @@
           .ks(3)
           .a_offset(17)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -41591,10 +46131,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -41604,10 +46144,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -41617,13 +46157,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -41632,10 +46172,10 @@
       .m(4)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -41645,10 +46185,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -41660,12 +46200,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -41676,11 +46216,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -41691,11 +46231,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -41705,11 +46245,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41722,13 +46262,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41739,12 +46279,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41756,12 +46296,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -41774,13 +46314,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41791,12 +46331,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41808,12 +46348,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -41826,13 +46366,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(4)
@@ -41843,11 +46383,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41861,13 +46401,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41879,12 +46419,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41896,12 +46436,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -41915,13 +46455,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(4)
@@ -41933,11 +46473,11 @@
         .k(k)
         .ks(3)
         .a_offset(23)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 4; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -41951,12 +46491,12 @@
           .ks(3)
           .a_offset(23)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -41966,10 +46506,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -41979,10 +46519,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -41992,13 +46532,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42007,10 +46547,10 @@
       .m(5)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42020,10 +46560,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -42035,12 +46575,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -42051,11 +46591,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -42066,11 +46606,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -42080,11 +46620,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42097,13 +46637,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42114,12 +46654,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42131,12 +46671,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -42149,13 +46689,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42166,12 +46706,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42183,12 +46723,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -42201,13 +46741,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(5)
@@ -42218,11 +46758,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42236,13 +46776,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42254,12 +46794,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42271,12 +46811,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42290,13 +46830,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(5)
@@ -42308,11 +46848,11 @@
         .k(k)
         .ks(3)
         .a_offset(29)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 5; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42326,12 +46866,12 @@
           .ks(3)
           .a_offset(29)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42341,10 +46881,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42354,10 +46894,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -42367,13 +46907,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42382,10 +46922,10 @@
       .m(6)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42395,10 +46935,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -42410,12 +46950,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -42426,11 +46966,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -42441,11 +46981,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -42455,11 +46995,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42472,13 +47012,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42489,12 +47029,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42506,12 +47046,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -42524,13 +47064,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42541,12 +47081,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42558,12 +47098,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -42576,13 +47116,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(6)
@@ -42593,11 +47133,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42611,13 +47151,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42629,12 +47169,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42646,12 +47186,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42665,13 +47205,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(6)
@@ -42683,11 +47223,11 @@
         .k(k)
         .ks(3)
         .a_offset(37)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 6; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42701,12 +47241,12 @@
           .ks(3)
           .a_offset(37)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42716,10 +47256,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42729,10 +47269,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -42742,13 +47282,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -42757,10 +47297,10 @@
       .m(1)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -42770,10 +47310,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -42785,12 +47325,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42801,11 +47341,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42816,11 +47356,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42830,11 +47370,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42847,13 +47387,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42864,12 +47404,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42881,12 +47421,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -42899,13 +47439,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42916,12 +47456,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -42933,12 +47473,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -42951,13 +47491,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(1)
@@ -42968,11 +47508,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -42986,13 +47526,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43004,12 +47544,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43021,12 +47561,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43040,13 +47580,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(1)
@@ -43058,11 +47598,11 @@
         .k(k)
         .ks(3)
         .a_offset(7)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 1; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43076,12 +47616,12 @@
           .ks(3)
           .a_offset(7)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -43091,10 +47631,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -43104,10 +47644,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -43117,13 +47657,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43132,10 +47672,10 @@
       .m(3)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43145,10 +47685,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -43160,12 +47700,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -43176,11 +47716,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -43191,11 +47731,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -43205,11 +47745,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43222,13 +47762,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43239,12 +47779,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43256,12 +47796,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -43274,13 +47814,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43291,12 +47831,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43308,12 +47848,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -43326,13 +47866,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(3)
@@ -43343,11 +47883,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43361,13 +47901,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43379,12 +47919,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43396,12 +47936,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43415,13 +47955,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(3)
@@ -43433,11 +47973,11 @@
         .k(k)
         .ks(3)
         .a_offset(17)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 3; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43451,12 +47991,12 @@
           .ks(3)
           .a_offset(17)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43466,10 +48006,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43479,10 +48019,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -43492,13 +48032,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43507,10 +48047,10 @@
       .m(4)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43520,10 +48060,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -43535,12 +48075,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43551,11 +48091,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43566,11 +48106,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43580,11 +48120,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43597,13 +48137,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43614,12 +48154,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43631,12 +48171,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -43649,13 +48189,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43666,12 +48206,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43683,12 +48223,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -43701,13 +48241,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43718,11 +48258,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43736,13 +48276,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43754,12 +48294,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43771,12 +48311,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43790,13 +48330,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(4)
@@ -43808,11 +48348,11 @@
         .k(k)
         .ks(3)
         .a_offset(23)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 4; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43826,12 +48366,12 @@
           .ks(3)
           .a_offset(23)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43841,10 +48381,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43854,10 +48394,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -43867,13 +48407,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -43882,10 +48422,10 @@
       .m(5)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -43895,10 +48435,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -43910,12 +48450,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43926,11 +48466,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43941,11 +48481,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -43955,11 +48495,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -43972,13 +48512,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -43989,12 +48529,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44006,12 +48546,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -44024,13 +48564,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44041,12 +48581,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44058,12 +48598,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -44076,13 +48616,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(5)
@@ -44093,11 +48633,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44111,13 +48651,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44129,12 +48669,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44146,12 +48686,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44165,13 +48705,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(5)
@@ -44183,11 +48723,11 @@
         .k(k)
         .ks(3)
         .a_offset(29)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 5; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44201,12 +48741,12 @@
           .ks(3)
           .a_offset(29)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -44216,10 +48756,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -44229,10 +48769,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -44242,13 +48782,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44257,10 +48797,10 @@
       .m(6)
       .n(8)
       .k(1)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44270,10 +48810,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -44285,12 +48825,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44301,11 +48841,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44316,11 +48856,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_gt_1) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44330,11 +48870,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44347,13 +48887,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44364,12 +48904,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44381,12 +48921,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -44399,13 +48939,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44416,12 +48956,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44433,12 +48973,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -44451,13 +48991,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, small_kernel) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44468,11 +49008,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44486,13 +49026,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44504,12 +49044,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44521,12 +49061,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44540,13 +49080,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, a_offset) {
     for (size_t k = 1; k <= 5; k += 2) {
       GemmMicrokernelTester()
         .mr(6)
@@ -44558,11 +49098,11 @@
         .k(k)
         .ks(3)
         .a_offset(37)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, zero) {
     for (uint32_t mz = 0; mz < 6; mz++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -44576,12 +49116,12 @@
           .ks(3)
           .a_offset(37)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44591,10 +49131,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44604,10 +49144,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_LOADSPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_LOADSPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -44617,13 +49157,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -44632,10 +49172,10 @@
       .m(1)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -44645,10 +49185,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -44660,12 +49200,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44676,11 +49216,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44691,11 +49231,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44705,11 +49245,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44722,13 +49262,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44738,11 +49278,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44755,13 +49295,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44771,11 +49311,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44788,13 +49328,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44805,12 +49345,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44822,12 +49362,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -44840,13 +49380,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44857,12 +49397,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44874,12 +49414,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -44892,13 +49432,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44909,11 +49449,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44927,13 +49467,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44945,12 +49485,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -44962,12 +49502,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -44981,13 +49521,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(1)
@@ -44999,11 +49539,11 @@
         .k(k)
         .ks(3)
         .a_offset(23)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 1; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45017,12 +49557,12 @@
           .ks(3)
           .a_offset(23)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -45032,10 +49572,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -45045,10 +49585,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -45058,13 +49598,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45073,10 +49613,10 @@
       .m(3)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45086,10 +49626,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -45101,12 +49641,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45117,11 +49657,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45132,11 +49672,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45146,11 +49686,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45163,13 +49703,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45179,11 +49719,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45196,13 +49736,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45212,11 +49752,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45229,13 +49769,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45246,12 +49786,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45263,12 +49803,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -45281,13 +49821,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45298,12 +49838,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45315,12 +49855,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -45333,13 +49873,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45350,11 +49890,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45368,13 +49908,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45386,12 +49926,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45403,12 +49943,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45422,13 +49962,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(3)
@@ -45440,11 +49980,11 @@
         .k(k)
         .ks(3)
         .a_offset(67)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 3; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45458,12 +49998,12 @@
           .ks(3)
           .a_offset(67)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45473,10 +50013,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45486,10 +50026,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -45499,13 +50039,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -45514,10 +50054,10 @@
       .m(4)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -45527,10 +50067,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -45542,12 +50082,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45558,11 +50098,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45573,11 +50113,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45587,11 +50127,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45604,13 +50144,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45620,11 +50160,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45637,13 +50177,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45653,11 +50193,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45670,13 +50210,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45687,12 +50227,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45704,12 +50244,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -45722,13 +50262,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45739,12 +50279,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45756,12 +50296,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -45774,13 +50314,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45791,11 +50331,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45809,13 +50349,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45827,12 +50367,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45844,12 +50384,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -45863,13 +50403,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(4)
@@ -45881,11 +50421,11 @@
         .k(k)
         .ks(3)
         .a_offset(83)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 4; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -45899,12 +50439,12 @@
           .ks(3)
           .a_offset(83)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -45914,10 +50454,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -45927,10 +50467,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -45940,13 +50480,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -45955,10 +50495,10 @@
       .m(5)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -45968,10 +50508,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -45983,12 +50523,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -45999,11 +50539,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46014,11 +50554,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46028,11 +50568,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46045,13 +50585,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46061,11 +50601,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46078,13 +50618,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46094,11 +50634,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46111,13 +50651,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46128,12 +50668,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46145,12 +50685,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -46163,13 +50703,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46180,12 +50720,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46197,12 +50737,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -46215,13 +50755,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46232,11 +50772,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46250,13 +50790,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46268,12 +50808,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46285,12 +50825,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46304,13 +50844,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(5)
@@ -46322,11 +50862,11 @@
         .k(k)
         .ks(3)
         .a_offset(103)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 5; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46340,12 +50880,12 @@
           .ks(3)
           .a_offset(103)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -46355,10 +50895,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -46368,10 +50908,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -46381,13 +50921,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46396,10 +50936,10 @@
       .m(6)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46409,10 +50949,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -46424,12 +50964,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46440,11 +50980,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46455,11 +50995,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46469,11 +51009,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46486,13 +51026,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46502,11 +51042,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46519,13 +51059,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46535,11 +51075,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46552,13 +51092,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46569,12 +51109,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46586,12 +51126,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -46604,13 +51144,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46621,12 +51161,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46638,12 +51178,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -46656,13 +51196,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46673,11 +51213,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46691,13 +51231,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46709,12 +51249,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46726,12 +51266,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46745,13 +51285,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, a_offset) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(6)
@@ -46763,11 +51303,11 @@
         .k(k)
         .ks(3)
         .a_offset(127)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, zero) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 6; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -46781,12 +51321,12 @@
           .ks(3)
           .a_offset(127)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46796,10 +51336,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46809,10 +51349,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -46822,13 +51362,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -46837,10 +51377,10 @@
       .m(1)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -46850,10 +51390,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 1; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -46865,12 +51405,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 1; m++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -46881,11 +51421,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -46896,11 +51436,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -46910,11 +51450,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46927,13 +51467,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(1)
@@ -46943,11 +51483,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46960,13 +51500,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(1)
@@ -46976,11 +51516,11 @@
         .m(1)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -46993,13 +51533,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47010,12 +51550,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47027,12 +51567,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -47045,13 +51585,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47062,12 +51602,12 @@
           .m(1)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47079,12 +51619,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 1; m++) {
@@ -47097,13 +51637,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47114,11 +51654,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47132,13 +51672,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47150,12 +51690,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47167,12 +51707,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 1; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47186,13 +51726,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(1)
@@ -47204,11 +51744,11 @@
         .k(k)
         .ks(3)
         .a_offset(23)
-        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 1; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47222,12 +51762,12 @@
           .ks(3)
           .a_offset(23)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -47237,10 +51777,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -47250,10 +51790,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_1X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(1)
       .nr(8)
@@ -47263,13 +51803,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47278,10 +51818,10 @@
       .m(3)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47291,10 +51831,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 3; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -47306,12 +51846,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 3; m++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47322,11 +51862,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47337,11 +51877,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47351,11 +51891,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47368,13 +51908,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47384,11 +51924,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47401,13 +51941,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47417,11 +51957,11 @@
         .m(3)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47434,13 +51974,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47451,12 +51991,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47468,12 +52008,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -47486,13 +52026,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47503,12 +52043,12 @@
           .m(3)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47520,12 +52060,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 3; m++) {
@@ -47538,13 +52078,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47555,11 +52095,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47573,13 +52113,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47591,12 +52131,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47608,12 +52148,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 3; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47627,13 +52167,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(3)
@@ -47645,11 +52185,11 @@
         .k(k)
         .ks(3)
         .a_offset(67)
-        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 3; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47663,12 +52203,12 @@
           .ks(3)
           .a_offset(67)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47678,10 +52218,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47691,10 +52231,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_3X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(3)
       .nr(8)
@@ -47704,13 +52244,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -47719,10 +52259,10 @@
       .m(4)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -47732,10 +52272,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -47747,12 +52287,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47763,11 +52303,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47778,11 +52318,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47792,11 +52332,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47809,13 +52349,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47825,11 +52365,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47842,13 +52382,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47858,11 +52398,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -47875,13 +52415,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47892,12 +52432,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47909,12 +52449,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -47927,13 +52467,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47944,12 +52484,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -47961,12 +52501,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -47979,13 +52519,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(4)
@@ -47996,11 +52536,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48014,13 +52554,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48032,12 +52572,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48049,12 +52589,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48068,13 +52608,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(4)
@@ -48086,11 +52626,11 @@
         .k(k)
         .ks(3)
         .a_offset(83)
-        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 4; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48104,12 +52644,12 @@
           .ks(3)
           .a_offset(83)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -48119,10 +52659,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -48132,10 +52672,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -48145,13 +52685,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48160,10 +52700,10 @@
       .m(5)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48173,10 +52713,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 5; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -48188,12 +52728,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 5; m++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48204,11 +52744,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48219,11 +52759,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48233,11 +52773,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48250,13 +52790,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48266,11 +52806,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48283,13 +52823,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48299,11 +52839,11 @@
         .m(5)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48316,13 +52856,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48333,12 +52873,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48350,12 +52890,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -48368,13 +52908,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48385,12 +52925,12 @@
           .m(5)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48402,12 +52942,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 5; m++) {
@@ -48420,13 +52960,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48437,11 +52977,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48455,13 +52995,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48473,12 +53013,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48490,12 +53030,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 5; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48509,13 +53049,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(5)
@@ -48527,11 +53067,11 @@
         .k(k)
         .ks(3)
         .a_offset(103)
-        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 5; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48545,12 +53085,12 @@
           .ks(3)
           .a_offset(103)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48560,10 +53100,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48573,10 +53113,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_5X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(5)
       .nr(8)
@@ -48586,13 +53126,13 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -48601,10 +53141,10 @@
       .m(6)
       .n(8)
       .k(4)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -48614,10 +53154,10 @@
       .n(8)
       .k(4)
       .cn_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile) {
     for (uint32_t m = 1; m <= 6; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -48629,12 +53169,12 @@
           .n(n)
           .k(4)
           .iterations(1)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_m) {
     for (uint32_t m = 1; m <= 6; m++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48645,11 +53185,11 @@
         .n(8)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_eq_4_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48660,11 +53200,11 @@
         .n(n)
         .k(4)
         .iterations(1)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_lt_4) {
     for (size_t k = 1; k < 4; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48674,11 +53214,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_lt_4_subtile) {
     for (size_t k = 1; k < 4; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48691,13 +53231,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_gt_4) {
     for (size_t k = 5; k < 8; k++) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48707,11 +53247,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_gt_4_subtile) {
     for (size_t k = 5; k < 8; k++) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48724,13 +53264,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_div_4) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_div_4) {
     for (size_t k = 8; k <= 40; k += 4) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48740,11 +53280,11 @@
         .m(6)
         .n(8)
         .k(k)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, k_div_4_subtile) {
     for (size_t k = 8; k <= 40; k += 4) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48757,13 +53297,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48774,12 +53314,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48791,12 +53331,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -48809,13 +53349,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48826,12 +53366,12 @@
           .m(6)
           .n(8)
           .k(k)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48843,12 +53383,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         for (uint32_t m = 1; m <= 6; m++) {
@@ -48861,13 +53401,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, small_kernel) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48878,11 +53418,11 @@
         .n(8)
         .k(k)
         .ks(3)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, small_kernel_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, small_kernel_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48896,13 +53436,13 @@
             .k(k)
             .ks(3)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_gt_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_gt_8_small_kernel) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48914,12 +53454,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, n_div_8_small_kernel) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, n_div_8_small_kernel) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48931,12 +53471,12 @@
           .n(8)
           .k(k)
           .ks(3)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 20; k += 5) {
       for (uint32_t m = 1; m <= 6; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -48950,13 +53490,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, a_offset) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, a_offset) {
     for (size_t k = 1; k <= 20; k += 5) {
       GemmMicrokernelTester()
         .mr(6)
@@ -48968,11 +53508,11 @@
         .k(k)
         .ks(3)
         .a_offset(127)
-        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, zero) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, zero) {
     for (uint32_t mz = 0; mz < 6; mz++) {
       for (size_t k = 1; k <= 20; k += 5) {
         GemmMicrokernelTester()
@@ -48986,12 +53526,12 @@
           .ks(3)
           .a_offset(127)
           .zero_index(mz)
-          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -49001,10 +53541,10 @@
       .n(8)
       .k(4)
       .qmin(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -49014,10 +53554,10 @@
       .n(8)
       .k(4)
       .qmax(128)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_IGEMM_MINMAX_6X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(6)
       .nr(8)
@@ -49027,7 +53567,7 @@
       .n(8)
       .k(4)
       .cm_stride(11)
-      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
diff --git a/test/f32-igemm-minmax.yaml b/test/f32-igemm-minmax.yaml
index 2748a9a..fffa71d 100644
--- a/test/f32-igemm-minmax.yaml
+++ b/test/f32-igemm-minmax.yaml
@@ -170,18 +170,38 @@
   k-block: 4
 - name: xnn_f32_igemm_minmax_ukernel_1x8__sse_load1
   k-block: 1
+- name: xnn_f32_igemm_minmax_ukernel_3x8__sse_load1
+  k-block: 1
 - name: xnn_f32_igemm_minmax_ukernel_4x8__sse_load1
   k-block: 1
+- name: xnn_f32_igemm_minmax_ukernel_5x8__sse_load1
+  k-block: 1
 - name: xnn_f32_igemm_minmax_ukernel_1x8__sse_dup
   k-block: 4
+- name: xnn_f32_igemm_minmax_ukernel_3x8__sse_dup
+  k-block: 4
 - name: xnn_f32_igemm_minmax_ukernel_4x8__sse_dup
   k-block: 4
+- name: xnn_f32_igemm_minmax_ukernel_5x8__sse_dup
+  k-block: 4
 - name: xnn_f32_igemm_minmax_ukernel_1x8s4__sse
   k-block: 4
+- name: xnn_f32_igemm_minmax_ukernel_3x8s4__sse
+  k-block: 4
 - name: xnn_f32_igemm_minmax_ukernel_4x8s4__sse
   k-block: 4
+- name: xnn_f32_igemm_minmax_ukernel_5x8s4__sse
+  k-block: 4
 - name: xnn_f32_igemm_minmax_ukernel_4x2c4__sse
   k-block: 4
+- name: xnn_f32_igemm_minmax_ukernel_1x8__sse2_dup
+  k-block: 4
+- name: xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup
+  k-block: 4
+- name: xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup
+  k-block: 4
+- name: xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup
+  k-block: 4
 - name: xnn_f32_igemm_minmax_ukernel_1x8__avx_broadcast
   k-block: 1
 - name: xnn_f32_igemm_minmax_ukernel_4x8__avx_broadcast
@@ -240,45 +260,45 @@
   k-block: 1
 - name: xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_arm
+- name: xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_loadsplat_x86
+- name: xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat
   k-block: 1
-- name: xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_arm
+- name: xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_arm
+- name: xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_arm
+- name: xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_arm
+- name: xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_arm
+- name: xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat
   k-block: 4
-- name: xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_splat_x86
+- name: xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_splat_x86
+- name: xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_splat_x86
+- name: xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_splat_x86
+- name: xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat
   k-block: 4
-- name: xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_splat_x86
+- name: xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat
   k-block: 4
 - name: xnn_f32_igemm_minmax_ukernel_1x8s4__wasmsimd_arm
   k-block: 4
diff --git a/test/f32-ppmm-minmax.cc b/test/f32-ppmm-minmax.cc
index 3f6c423..24e7751 100644
--- a/test/f32-ppmm-minmax.cc
+++ b/test/f32-ppmm-minmax.cc
@@ -1713,7 +1713,7 @@
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_1) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -1722,10 +1722,10 @@
       .m(4)
       .n(8)
       .k(1)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -1735,10 +1735,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_1_strided_a) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -1748,10 +1748,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_1_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -1763,12 +1763,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_1_subtile_m) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -1779,11 +1779,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_eq_1_subtile_n) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -1794,11 +1794,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_1) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -1808,11 +1808,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, k_gt_1_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -1825,13 +1825,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -1842,12 +1842,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -1859,12 +1859,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -1876,12 +1876,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -1894,13 +1894,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -1911,12 +1911,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -1928,12 +1928,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -1945,12 +1945,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -1963,13 +1963,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -1983,13 +1983,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
         }
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, qmin) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -1999,10 +1999,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, qmax) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -2012,10 +2012,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_ARM_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -2025,13 +2025,13 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
 
 #if XNN_ARCH_WASMSIMD
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_1) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_1) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -2040,10 +2040,10 @@
       .m(4)
       .n(8)
       .k(1)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cn) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cn) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -2053,10 +2053,10 @@
       .n(8)
       .k(1)
       .cn_stride(11)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_1_strided_a) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_1_strided_a) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -2066,10 +2066,10 @@
       .n(8)
       .k(1)
       .a_stride(3)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_1_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_1_subtile) {
     for (uint32_t m = 1; m <= 4; m++) {
       for (uint32_t n = 1; n <= 8; n++) {
         GemmMicrokernelTester()
@@ -2081,12 +2081,12 @@
           .n(n)
           .k(1)
           .iterations(1)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_1_subtile_m) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_1_subtile_m) {
     for (uint32_t m = 1; m <= 4; m++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -2097,11 +2097,11 @@
         .n(8)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_eq_1_subtile_n) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_eq_1_subtile_n) {
     for (uint32_t n = 1; n <= 8; n++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -2112,11 +2112,11 @@
         .n(n)
         .k(1)
         .iterations(1)
-        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_1) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_1) {
     for (size_t k = 2; k < 10; k++) {
       GemmMicrokernelTester()
         .mr(4)
@@ -2126,11 +2126,11 @@
         .m(4)
         .n(8)
         .k(k)
-        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+        .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, k_gt_1_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, k_gt_1_subtile) {
     for (size_t k = 2; k < 10; k++) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -2143,13 +2143,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -2160,12 +2160,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_cn) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -2177,12 +2177,12 @@
           .n(8)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_strided_a) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -2194,12 +2194,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_gt_8_subtile) {
     for (uint32_t n = 9; n < 16; n++) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -2212,13 +2212,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -2229,12 +2229,12 @@
           .m(4)
           .n(8)
           .k(k)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_strided_cn) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -2246,12 +2246,12 @@
           .n(n)
           .k(k)
           .cn_stride(11)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_strided_a) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         GemmMicrokernelTester()
@@ -2263,12 +2263,12 @@
           .n(n)
           .k(k)
           .a_stride(7)
-          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+          .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, n_div_8_subtile) {
     for (uint32_t n = 16; n <= 24; n += 8) {
       for (size_t k = 1; k <= 5; k += 2) {
         for (uint32_t m = 1; m <= 4; m++) {
@@ -2281,13 +2281,13 @@
             .n(n)
             .k(k)
             .iterations(1)
-            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cm_subtile) {
     for (size_t k = 1; k <= 5; k += 2) {
       for (uint32_t m = 1; m <= 4; m++) {
         for (uint32_t n = 1; n <= 8; n++) {
@@ -2301,13 +2301,13 @@
             .k(k)
             .cm_stride(11)
             .iterations(1)
-            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+            .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
         }
       }
     }
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, qmin) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, qmin) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -2317,10 +2317,10 @@
       .n(8)
       .k(1)
       .qmin(128)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, qmax) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, qmax) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -2330,10 +2330,10 @@
       .n(8)
       .k(1)
       .qmax(128)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 
-  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_SPLAT_X86, strided_cm) {
+  TEST(F32_PPMM_MINMAX_4X8__WASMSIMD_X86_SPLAT, strided_cm) {
     GemmMicrokernelTester()
       .mr(4)
       .nr(8)
@@ -2343,7 +2343,7 @@
       .n(8)
       .k(1)
       .cm_stride(11)
-      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86);
+      .Test(xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat);
   }
 #endif  // XNN_ARCH_WASMSIMD
 
diff --git a/test/f32-ppmm-minmax.yaml b/test/f32-ppmm-minmax.yaml
index f3d23bd..89dcbed 100644
--- a/test/f32-ppmm-minmax.yaml
+++ b/test/f32-ppmm-minmax.yaml
@@ -12,9 +12,9 @@
   k-block: 1
 - name: xnn_f32_ppmm_minmax_ukernel_4x8__sse
   k-block: 1
-- name: xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_arm
+- name: xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat
   k-block: 1
-- name: xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_splat_x86
+- name: xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat
   k-block: 1
 - name: xnn_f32_ppmm_minmax_ukernel_4x2__scalar
   k-block: 1
diff --git a/test/f32-sigmoid.cc b/test/f32-sigmoid.cc
index ea33844..4b763b5 100644
--- a/test/f32-sigmoid.cc
+++ b/test/f32-sigmoid.cc
@@ -4247,53 +4247,6 @@
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM || XNN_ARCH_ARM64
-  TEST(F32_SIGMOID__NEON_FRAC_P9_P10_NR1RECPS_X16, batch_eq_16) {
-    TEST_REQUIRES_ARM_NEON;
-    VUnOpMicrokernelTester()
-      .batch_size(16)
-      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_sigmoid_ukernel__neon_frac_p9_p10_nr1recps_x16), VUnOpMicrokernelTester::OpType::Sigmoid);
-  }
-
-  TEST(F32_SIGMOID__NEON_FRAC_P9_P10_NR1RECPS_X16, batch_div_16) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
-      VUnOpMicrokernelTester()
-        .batch_size(batch_size)
-        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_sigmoid_ukernel__neon_frac_p9_p10_nr1recps_x16), VUnOpMicrokernelTester::OpType::Sigmoid);
-    }
-  }
-
-  TEST(F32_SIGMOID__NEON_FRAC_P9_P10_NR1RECPS_X16, batch_lt_16) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
-      VUnOpMicrokernelTester()
-        .batch_size(batch_size)
-        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_sigmoid_ukernel__neon_frac_p9_p10_nr1recps_x16), VUnOpMicrokernelTester::OpType::Sigmoid);
-    }
-  }
-
-  TEST(F32_SIGMOID__NEON_FRAC_P9_P10_NR1RECPS_X16, batch_gt_16) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
-      VUnOpMicrokernelTester()
-        .batch_size(batch_size)
-        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_sigmoid_ukernel__neon_frac_p9_p10_nr1recps_x16), VUnOpMicrokernelTester::OpType::Sigmoid);
-    }
-  }
-
-  TEST(F32_SIGMOID__NEON_FRAC_P9_P10_NR1RECPS_X16, inplace) {
-    TEST_REQUIRES_ARM_NEON;
-    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
-      VUnOpMicrokernelTester()
-        .batch_size(batch_size)
-        .inplace(true)
-        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_sigmoid_ukernel__neon_frac_p9_p10_nr1recps_x16), VUnOpMicrokernelTester::OpType::Sigmoid);
-    }
-  }
-#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
-
-
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
   TEST(F32_SIGMOID__SSE2_P5_DIV_X4, batch_eq_4) {
     TEST_REQUIRES_X86_SSE2;
diff --git a/test/f32-sigmoid.yaml b/test/f32-sigmoid.yaml
index f047299..727285e 100644
--- a/test/f32-sigmoid.yaml
+++ b/test/f32-sigmoid.yaml
@@ -128,7 +128,6 @@
 - name: xnn_f32_sigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x16
 - name: xnn_f32_sigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x20
 - name: xnn_f32_sigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x24
-- name: xnn_f32_sigmoid_ukernel__neon_frac_p9_p10_nr1recps_x16
 - name: xnn_f32_sigmoid_ukernel__sse2_p5_div_x4
 - name: xnn_f32_sigmoid_ukernel__sse2_p5_div_x8
 - name: xnn_f32_sigmoid_ukernel__sse2_p5_div_x12
diff --git a/test/f32-spmm-minmax.cc b/test/f32-spmm-minmax.cc
index eccf471..451b6c3 100644
--- a/test/f32-spmm-minmax.cc
+++ b/test/f32-spmm-minmax.cc
@@ -17,7 +17,2485 @@
 #include "spmm-microkernel-tester.h"
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_4X1__NEON, k_eq_1) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(4)
+      .nr(1)
+      .m(4)
+      .n(1)
+      .k(1)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, k_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 2; k < 10; k++) {
+      SpMMMicrokernelTester()
+        .mr(4)
+        .nr(1)
+        .m(4)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, m_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 4; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(4)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, m_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 8; m <= 12; m += 4) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(4)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, m_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 5; m < 8; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(4)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .output_stride(11)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, k_eq_1) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(4)
+      .nr(1)
+      .m(4)
+      .n(1)
+      .k(1)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, k_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 2; k < 10; k++) {
+      SpMMMicrokernelTester()
+        .mr(4)
+        .nr(1)
+        .m(4)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, m_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 4; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(4)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, m_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 8; m <= 12; m += 4) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(4)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, m_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 5; m < 8; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(4)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .output_stride(11)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_PIPELINED, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, k_eq_2) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(4)
+      .nr(1)
+      .m(4)
+      .n(1)
+      .k(2)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, k_lt_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 2; k++) {
+      SpMMMicrokernelTester()
+        .mr(4)
+        .nr(1)
+        .m(4)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, k_gt_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 3; k < 4; k++) {
+      SpMMMicrokernelTester()
+        .mr(4)
+        .nr(1)
+        .m(4)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, k_div_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 4; k <= 20; k += 2) {
+      SpMMMicrokernelTester()
+        .mr(4)
+        .nr(1)
+        .m(4)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, m_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 4; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(4)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, m_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 8; m <= 12; m += 4) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(4)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, m_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 5; m < 8; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(4)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .output_stride(11)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_4X1__NEON_X2, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(4)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_4x1__neon_x2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_8X1__NEON, k_eq_1) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(8)
+      .nr(1)
+      .m(8)
+      .n(1)
+      .k(1)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, k_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 2; k < 10; k++) {
+      SpMMMicrokernelTester()
+        .mr(8)
+        .nr(1)
+        .m(8)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, m_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 8; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(8)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, m_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 16; m <= 24; m += 8) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(8)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, m_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 9; m < 16; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(8)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .output_stride(19)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, k_eq_1) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(8)
+      .nr(1)
+      .m(8)
+      .n(1)
+      .k(1)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, k_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 2; k < 10; k++) {
+      SpMMMicrokernelTester()
+        .mr(8)
+        .nr(1)
+        .m(8)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, m_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 8; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(8)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, m_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 16; m <= 24; m += 8) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(8)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, m_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 9; m < 16; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(8)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .output_stride(19)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_PIPELINED, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, k_eq_2) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(8)
+      .nr(1)
+      .m(8)
+      .n(1)
+      .k(2)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, k_lt_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 2; k++) {
+      SpMMMicrokernelTester()
+        .mr(8)
+        .nr(1)
+        .m(8)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, k_gt_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 3; k < 4; k++) {
+      SpMMMicrokernelTester()
+        .mr(8)
+        .nr(1)
+        .m(8)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, k_div_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 4; k <= 20; k += 2) {
+      SpMMMicrokernelTester()
+        .mr(8)
+        .nr(1)
+        .m(8)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, m_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 8; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(8)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, m_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 16; m <= 24; m += 8) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(8)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, m_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 9; m < 16; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(8)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .output_stride(19)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_8X1__NEON_X2, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(8)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_8x1__neon_x2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_12X1__NEON, k_eq_1) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(12)
+      .nr(1)
+      .m(12)
+      .n(1)
+      .k(1)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, k_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 2; k < 10; k++) {
+      SpMMMicrokernelTester()
+        .mr(12)
+        .nr(1)
+        .m(12)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(12)
+          .nr(1)
+          .m(12)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, m_lt_12) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 12; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(12)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, m_div_12) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 24; m <= 36; m += 12) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(12)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, m_gt_12) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 13; m < 24; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(12)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(12)
+          .nr(1)
+          .m(24)
+          .n(n)
+          .k(k)
+          .output_stride(29)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(12)
+          .nr(1)
+          .m(24)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(12)
+          .nr(1)
+          .m(24)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(12)
+          .nr(1)
+          .m(24)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_12X1__NEON, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(12)
+          .nr(1)
+          .m(24)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_12x1__neon);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_16X1__NEON, k_eq_1) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(16)
+      .nr(1)
+      .m(16)
+      .n(1)
+      .k(1)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, k_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 2; k < 10; k++) {
+      SpMMMicrokernelTester()
+        .mr(16)
+        .nr(1)
+        .m(16)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, m_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 16; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(16)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, m_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 32; m <= 48; m += 16) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(16)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, m_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 17; m < 32; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(16)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .output_stride(37)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, k_eq_1) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(16)
+      .nr(1)
+      .m(16)
+      .n(1)
+      .k(1)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, k_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 2; k < 10; k++) {
+      SpMMMicrokernelTester()
+        .mr(16)
+        .nr(1)
+        .m(16)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, m_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 16; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(16)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, m_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 32; m <= 48; m += 16) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(16)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, m_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 17; m < 32; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(16)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .output_stride(37)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_PIPELINED, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, k_eq_2) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(16)
+      .nr(1)
+      .m(16)
+      .n(1)
+      .k(2)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, k_lt_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 2; k++) {
+      SpMMMicrokernelTester()
+        .mr(16)
+        .nr(1)
+        .m(16)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, k_gt_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 3; k < 4; k++) {
+      SpMMMicrokernelTester()
+        .mr(16)
+        .nr(1)
+        .m(16)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, k_div_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 4; k <= 20; k += 2) {
+      SpMMMicrokernelTester()
+        .mr(16)
+        .nr(1)
+        .m(16)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(16)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, m_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 16; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(16)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, m_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 32; m <= 48; m += 16) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(16)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, m_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 17; m < 32; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(16)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .output_stride(37)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_16X1__NEON_X2, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(16)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_16x1__neon_x2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_32X1__NEON, k_eq_1) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(32)
+      .nr(1)
+      .m(32)
+      .n(1)
+      .k(1)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, k_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 2; k < 10; k++) {
+      SpMMMicrokernelTester()
+        .mr(32)
+        .nr(1)
+        .m(32)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, m_lt_32) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 32; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(32)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, m_div_32) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 64; m <= 96; m += 32) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(32)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, m_gt_32) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 33; m < 64; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(32)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .output_stride(67)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, k_eq_1) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(32)
+      .nr(1)
+      .m(32)
+      .n(1)
+      .k(1)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, k_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 2; k < 10; k++) {
+      SpMMMicrokernelTester()
+        .mr(32)
+        .nr(1)
+        .m(32)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, m_lt_32) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 32; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(32)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, m_div_32) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 64; m <= 96; m += 32) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(32)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, m_gt_32) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 33; m < 64; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 5; k += 2) {
+          SpMMMicrokernelTester()
+            .mr(32)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .output_stride(67)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_PIPELINED, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 5; k += 2) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, k_eq_2) {
+    TEST_REQUIRES_ARM_NEON;
+    SpMMMicrokernelTester()
+      .mr(32)
+      .nr(1)
+      .m(32)
+      .n(1)
+      .k(2)
+      .sparsity(0.0f)
+      .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, k_lt_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 2; k++) {
+      SpMMMicrokernelTester()
+        .mr(32)
+        .nr(1)
+        .m(32)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, k_gt_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 3; k < 4; k++) {
+      SpMMMicrokernelTester()
+        .mr(32)
+        .nr(1)
+        .m(32)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, k_div_2) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 4; k <= 20; k += 2) {
+      SpMMMicrokernelTester()
+        .mr(32)
+        .nr(1)
+        .m(32)
+        .n(1)
+        .k(k)
+        .sparsity(0.0f)
+        .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, n_gt_1) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 2; n < 10; n++) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(32)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, m_lt_32) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m < 32; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(32)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, m_div_32) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 64; m <= 96; m += 32) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(32)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, m_gt_32) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 33; m < 64; m++) {
+      for (uint32_t n = 1; n < 10; n += 2) {
+        for (size_t k = 1; k <= 10; k += 3) {
+          SpMMMicrokernelTester()
+            .mr(32)
+            .nr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .sparsity(0.0f)
+            .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+        }
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, output_stride) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .output_stride(67)
+          .sparsity(0.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmin(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(0.0f)
+          .qmax(128)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, half_sparse) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(0.5f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+      }
+    }
+  }
+
+  TEST(F32_SPMM_MINMAX_32X1__NEON_X2, zero_weights) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n < 10; n += 2) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        SpMMMicrokernelTester()
+          .mr(32)
+          .nr(1)
+          .m(64)
+          .n(n)
+          .k(k)
+          .sparsity(1.0f)
+          .Test(xnn_f32_spmm_minmax_ukernel_32x1__neon_x2);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_4X1__NEONFMA, k_eq_1) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -196,7 +2674,7 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
 #if XNN_ARCH_ARM64
@@ -653,7 +3131,7 @@
 #endif  // XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_4X1__NEONFMA_PIPELINED, k_eq_1) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -832,10 +3310,10 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_4X1__NEONFMA_X2, k_eq_2) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -1042,10 +3520,10 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_8X1__NEONFMA, k_eq_1) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -1224,7 +3702,7 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
 #if XNN_ARCH_ARM64
@@ -1681,7 +4159,7 @@
 #endif  // XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_8X1__NEONFMA_PIPELINED, k_eq_1) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -1860,10 +4338,10 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_8X1__NEONFMA_X2, k_eq_2) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -2070,10 +4548,10 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_12X1__NEONFMA, k_eq_1) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -2252,7 +4730,7 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
 #if XNN_ARCH_ARM64
@@ -2709,7 +5187,7 @@
 #endif  // XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_16X1__NEONFMA, k_eq_1) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -2888,7 +5366,7 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
 #if XNN_ARCH_ARM64
@@ -3345,7 +5823,7 @@
 #endif  // XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_16X1__NEONFMA_PIPELINED, k_eq_1) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -3524,10 +6002,10 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_16X1__NEONFMA_X2, k_eq_2) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -3734,10 +6212,10 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_32X1__NEONFMA, k_eq_1) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -3916,7 +6394,7 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
 #if XNN_ARCH_ARM64
@@ -4373,7 +6851,7 @@
 #endif  // XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_32X1__NEONFMA_PIPELINED, k_eq_1) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -4552,10 +7030,10 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
-#if XNN_ARCH_ARM64
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(F32_SPMM_MINMAX_32X1__NEONFMA_X2, k_eq_2) {
     TEST_REQUIRES_ARM_NEON_FMA;
     SpMMMicrokernelTester()
@@ -4762,7 +7240,7 @@
       }
     }
   }
-#endif  // XNN_ARCH_ARM64
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
 
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
diff --git a/test/f32-spmm-minmax.yaml b/test/f32-spmm-minmax.yaml
index daf91c1..c791267 100644
--- a/test/f32-spmm-minmax.yaml
+++ b/test/f32-spmm-minmax.yaml
@@ -2,10 +2,34 @@
 #
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
+- name: xnn_f32_spmm_minmax_ukernel_4x1__neon
+  k-block: 1
+- name: xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined
+  k-block: 1
+- name: xnn_f32_spmm_minmax_ukernel_4x1__neon_x2
+  k-block: 2
+- name: xnn_f32_spmm_minmax_ukernel_8x1__neon
+  k-block: 1
+- name: xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined
+  k-block: 1
+- name: xnn_f32_spmm_minmax_ukernel_8x1__neon_x2
+  k-block: 2
+- name: xnn_f32_spmm_minmax_ukernel_12x1__neon
+  k-block: 1
+- name: xnn_f32_spmm_minmax_ukernel_16x1__neon
+  k-block: 1
+- name: xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined
+  k-block: 1
+- name: xnn_f32_spmm_minmax_ukernel_16x1__neon_x2
+  k-block: 2
+- name: xnn_f32_spmm_minmax_ukernel_32x1__neon
+  k-block: 1
+- name: xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined
+  k-block: 1
+- name: xnn_f32_spmm_minmax_ukernel_32x1__neon_x2
+  k-block: 2
 - name: xnn_f32_spmm_minmax_ukernel_4x1__neonfma
   k-block: 1
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_4x2__neonfma
   k-block: 1
   arch:
@@ -16,16 +40,10 @@
   - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_4x1__neonfma_pipelined
   k-block: 1
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_4x1__neonfma_x2
   k-block: 2
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_8x1__neonfma
   k-block: 1
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_8x2__neonfma
   k-block: 1
   arch:
@@ -36,16 +54,10 @@
   - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_8x1__neonfma_pipelined
   k-block: 1
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_8x1__neonfma_x2
   k-block: 2
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_12x1__neonfma
   k-block: 1
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_12x2__neonfma
   k-block: 1
   arch:
@@ -56,8 +68,6 @@
   - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_16x1__neonfma
   k-block: 1
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_16x2__neonfma
   k-block: 1
   arch:
@@ -68,16 +78,10 @@
   - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_16x1__neonfma_pipelined
   k-block: 1
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_16x1__neonfma_x2
   k-block: 2
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_32x1__neonfma
   k-block: 1
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_32x2__neonfma
   k-block: 1
   arch:
@@ -88,12 +92,8 @@
   - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_32x1__neonfma_pipelined
   k-block: 1
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_32x1__neonfma_x2
   k-block: 2
-  arch:
-  - aarch64
 - name: xnn_f32_spmm_minmax_ukernel_4x1__sse
   k-block: 1
 - name: xnn_f32_spmm_minmax_ukernel_8x1__sse
diff --git a/test/f32-velu.cc b/test/f32-velu.cc
new file mode 100644
index 0000000..4b7cdc7
--- /dev/null
+++ b/test/f32-velu.cc
@@ -0,0 +1,13644 @@
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+//
+// Auto-generated file. Do not edit!
+//   Specification: test/f32-velu.yaml
+//   Generator: tools/generate-vunary-test.py
+
+
+#include <gtest/gtest.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+
+#include <xnnpack/vunary.h>
+#include "vunary-microkernel-tester.h"
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X4, batch_eq_4) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X4, batch_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X4, batch_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X4, batch_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X4, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X4, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X4, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X4, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X8, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X8, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X8, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X8, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X12, batch_eq_12) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X12, batch_div_12) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X12, batch_lt_12) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X12, batch_gt_12) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X12, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X12, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X12, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X12, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X16, batch_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X16, batch_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X16, batch_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X16, batch_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X16, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X16, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X16, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X16, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X20, batch_eq_20) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X20, batch_div_20) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X20, batch_lt_20) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X20, batch_gt_20) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X20, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X20, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X20, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X20, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X24, batch_eq_24) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X24, batch_div_24) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X24, batch_lt_24) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X24, batch_gt_24) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X24, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X24, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X24, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_LUT16_P3_X24, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_P6_X4, batch_eq_4) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X4, batch_div_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X4, batch_lt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X4, batch_gt_4) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X4, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X4, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X4, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X4, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_P6_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X8, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X8, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X8, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X8, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_P6_X12, batch_eq_12) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X12, batch_div_12) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X12, batch_lt_12) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X12, batch_gt_12) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X12, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X12, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X12, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X12, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_P6_X16, batch_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X16, batch_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X16, batch_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X16, batch_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X16, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X16, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X16, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X16, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_P6_X20, batch_eq_20) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X20, batch_div_20) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X20, batch_lt_20) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X20, batch_gt_20) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X20, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X20, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X20, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X20, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEON_RR2_P6_X24, batch_eq_24) {
+    TEST_REQUIRES_ARM_NEON;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X24, batch_div_24) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X24, batch_lt_24) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X24, batch_gt_24) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X24, inplace) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X24, prescale) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X24, alpha) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEON_RR2_P6_X24, beta) {
+    TEST_REQUIRES_ARM_NEON;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neon_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X4, batch_eq_4) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X4, batch_div_4) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X4, batch_lt_4) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X4, batch_gt_4) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X4, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X4, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X4, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X4, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X8, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X8, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X8, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X8, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X12, batch_eq_12) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X12, batch_div_12) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X12, batch_lt_12) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X12, batch_gt_12) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X12, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X12, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X12, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X12, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X16, batch_eq_16) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X16, batch_div_16) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X16, batch_lt_16) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X16, batch_gt_16) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X16, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X16, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X16, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X16, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X20, batch_eq_20) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X20, batch_div_20) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X20, batch_lt_20) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X20, batch_gt_20) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X20, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X20, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X20, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X20, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X24, batch_eq_24) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X24, batch_div_24) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X24, batch_lt_24) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X24, batch_gt_24) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X24, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X24, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X24, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_LUT16_P3_X24, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_P6_X4, batch_eq_4) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X4, batch_div_4) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X4, batch_lt_4) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X4, batch_gt_4) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X4, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X4, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X4, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X4, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_P6_X8, batch_eq_8) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X8, batch_div_8) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X8, batch_lt_8) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X8, batch_gt_8) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X8, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X8, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X8, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X8, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_P6_X12, batch_eq_12) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X12, batch_div_12) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X12, batch_lt_12) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X12, batch_gt_12) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X12, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X12, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X12, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X12, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_P6_X16, batch_eq_16) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X16, batch_div_16) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X16, batch_lt_16) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X16, batch_gt_16) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X16, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X16, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X16, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X16, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_P6_X20, batch_eq_20) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X20, batch_div_20) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X20, batch_lt_20) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X20, batch_gt_20) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X20, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X20, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X20, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X20, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(F32_VELU__NEONFMA_RR1_P6_X24, batch_eq_24) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X24, batch_div_24) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X24, batch_lt_24) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X24, batch_gt_24) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X24, inplace) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X24, prescale) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X24, alpha) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__NEONFMA_RR1_P6_X24, beta) {
+    TEST_REQUIRES_ARM_NEON_FMA;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__neonfma_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X4, batch_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X4, batch_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X4, batch_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X4, batch_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X4, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X4, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X4, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X4, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X8, batch_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X8, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X8, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X8, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X8, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X12, batch_eq_12) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X12, batch_div_12) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X12, batch_lt_12) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X12, batch_gt_12) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X12, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X12, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X12, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X12, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X16, batch_div_16) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X16, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X16, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X16, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X16, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X20, batch_eq_20) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X20, batch_div_20) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X20, batch_lt_20) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X20, batch_gt_20) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X20, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X20, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X20, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X20, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X24, batch_div_24) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X24, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X24, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X24, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_LUT16_P3_X24, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_P6_X4, batch_eq_4) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X4, batch_div_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X4, batch_lt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X4, batch_gt_4) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X4, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X4, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X4, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X4, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_P6_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X8, batch_div_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X8, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X8, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X8, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X8, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_P6_X12, batch_eq_12) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X12, batch_div_12) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X12, batch_lt_12) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X12, batch_gt_12) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X12, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X12, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X12, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X12, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_P6_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X16, batch_div_16) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X16, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X16, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X16, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X16, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_P6_X20, batch_eq_20) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X20, batch_div_20) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X20, batch_lt_20) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X20, batch_gt_20) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X20, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X20, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X20, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X20, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE2_RR2_P6_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_SSE2;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X24, batch_div_24) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X24, inplace) {
+    TEST_REQUIRES_X86_SSE2;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X24, prescale) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X24, alpha) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE2_RR2_P6_X24, beta) {
+    TEST_REQUIRES_X86_SSE2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse2_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X4, batch_eq_4) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X4, batch_div_4) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X4, batch_lt_4) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X4, batch_gt_4) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X4, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X4, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X4, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X4, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X8, batch_div_8) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X8, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X8, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X8, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X8, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X12, batch_eq_12) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X12, batch_div_12) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X12, batch_lt_12) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X12, batch_gt_12) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X12, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X12, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X12, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X12, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X16, batch_div_16) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X16, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X16, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X16, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X16, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X20, batch_eq_20) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X20, batch_div_20) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X20, batch_lt_20) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X20, batch_gt_20) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X20, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X20, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X20, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X20, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X24, batch_div_24) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X24, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X24, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X24, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_LUT16_P3_X24, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_P6_X4, batch_eq_4) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X4, batch_div_4) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X4, batch_lt_4) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X4, batch_gt_4) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X4, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X4, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X4, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X4, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_P6_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X8, batch_div_8) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X8, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X8, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X8, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X8, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_P6_X12, batch_eq_12) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X12, batch_div_12) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X12, batch_lt_12) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X12, batch_gt_12) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X12, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X12, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X12, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X12, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_P6_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X16, batch_div_16) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X16, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X16, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X16, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X16, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_P6_X20, batch_eq_20) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X20, batch_div_20) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X20, batch_lt_20) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X20, batch_gt_20) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X20, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X20, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X20, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X20, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__SSE41_RR2_P6_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_SSE41;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X24, batch_div_24) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X24, inplace) {
+    TEST_REQUIRES_X86_SSE41;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X24, prescale) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X24, alpha) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__SSE41_RR2_P6_X24, beta) {
+    TEST_REQUIRES_X86_SSE41;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__sse41_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X8, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X8, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X8, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X8, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X16, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X16, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X16, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X16, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X24, batch_div_24) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X24, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X24, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X24, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X24, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X32, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X32, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X32, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X32, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X40, batch_eq_40) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(40)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X40, batch_div_40) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 80; batch_size < 400; batch_size += 40) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X40, batch_lt_40) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X40, batch_gt_40) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 41; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X40, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X40, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X40, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X40, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X48, batch_eq_48) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(48)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X48, batch_div_48) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 96; batch_size < 480; batch_size += 48) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X48, batch_lt_48) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X48, batch_gt_48) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 49; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X48, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X48, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X48, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT4_P4_PERM_X48, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X8, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X8, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X8, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X8, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X16, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X16, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X16, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X16, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X24, batch_div_24) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X24, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X24, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X24, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X24, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X32, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X32, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X32, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X32, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X40, batch_eq_40) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(40)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X40, batch_div_40) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 80; batch_size < 400; batch_size += 40) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X40, batch_lt_40) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X40, batch_gt_40) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 41; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X40, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X40, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X40, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X40, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X48, batch_eq_48) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(48)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X48, batch_div_48) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 96; batch_size < 480; batch_size += 48) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X48, batch_lt_48) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X48, batch_gt_48) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 49; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X48, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X48, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X48, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_LUT16_P3_X48, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_P6_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X8, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X8, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X8, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X8, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_P6_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X16, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X16, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X16, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X16, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_P6_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X24, batch_div_24) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X24, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X24, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X24, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X24, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_P6_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X32, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X32, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X32, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X32, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_P6_X40, batch_eq_40) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(40)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X40, batch_div_40) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 80; batch_size < 400; batch_size += 40) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X40, batch_lt_40) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X40, batch_gt_40) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 41; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X40, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X40, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X40, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X40, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX_RR2_P6_X48, batch_eq_48) {
+    TEST_REQUIRES_X86_AVX;
+    VUnOpMicrokernelTester()
+      .batch_size(48)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X48, batch_div_48) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 96; batch_size < 480; batch_size += 48) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X48, batch_lt_48) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X48, batch_gt_48) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 49; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X48, inplace) {
+    TEST_REQUIRES_X86_AVX;
+    for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X48, prescale) {
+    TEST_REQUIRES_X86_AVX;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X48, alpha) {
+    TEST_REQUIRES_X86_AVX;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX_RR2_P6_X48, beta) {
+    TEST_REQUIRES_X86_AVX;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx_rr2_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X8, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X8, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X8, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X8, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X16, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X16, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X16, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X16, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X24, batch_div_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X24, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X24, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X24, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X24, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X32, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X32, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X32, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X32, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X40, batch_eq_40) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(40)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X40, batch_div_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 80; batch_size < 400; batch_size += 40) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X40, batch_lt_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X40, batch_gt_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 41; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X40, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X40, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X40, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X40, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X48, batch_eq_48) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(48)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X48, batch_div_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 96; batch_size < 480; batch_size += 48) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X48, batch_lt_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X48, batch_gt_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 49; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X48, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X48, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X48, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X48, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X56, batch_eq_56) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(56)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X56, batch_div_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 112; batch_size < 560; batch_size += 56) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X56, batch_lt_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 56; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X56, batch_gt_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 57; batch_size < 112; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X56, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X56, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X56, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X56, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X64, batch_eq_64) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(64)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X64, batch_div_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 128; batch_size < 640; batch_size += 64) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X64, batch_lt_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X64, batch_gt_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 65; batch_size < 128; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X64, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X64, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X64, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X64, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X72, batch_eq_72) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(72)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X72, batch_div_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 144; batch_size < 720; batch_size += 72) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X72, batch_lt_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 72; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X72, batch_gt_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 73; batch_size < 144; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X72, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X72, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X72, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X72, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X80, batch_eq_80) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(80)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X80, batch_div_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 160; batch_size < 800; batch_size += 80) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X80, batch_lt_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X80, batch_gt_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 81; batch_size < 160; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X80, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X80, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X80, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT4_P4_PERM_X80, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X8, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X8, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X8, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X8, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X16, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X16, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X16, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X16, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X24, batch_div_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X24, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X24, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X24, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X24, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X32, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X32, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X32, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X32, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X40, batch_eq_40) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(40)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X40, batch_div_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 80; batch_size < 400; batch_size += 40) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X40, batch_lt_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X40, batch_gt_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 41; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X40, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X40, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X40, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X40, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X48, batch_eq_48) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(48)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X48, batch_div_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 96; batch_size < 480; batch_size += 48) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X48, batch_lt_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X48, batch_gt_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 49; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X48, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X48, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X48, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X48, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X56, batch_eq_56) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(56)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X56, batch_div_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 112; batch_size < 560; batch_size += 56) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X56, batch_lt_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 56; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X56, batch_gt_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 57; batch_size < 112; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X56, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X56, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X56, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X56, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X64, batch_eq_64) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(64)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X64, batch_div_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 128; batch_size < 640; batch_size += 64) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X64, batch_lt_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X64, batch_gt_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 65; batch_size < 128; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X64, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X64, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X64, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X64, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X72, batch_eq_72) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(72)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X72, batch_div_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 144; batch_size < 720; batch_size += 72) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X72, batch_lt_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 72; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X72, batch_gt_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 73; batch_size < 144; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X72, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X72, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X72, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X72, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X80, batch_eq_80) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(80)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X80, batch_div_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 160; batch_size < 800; batch_size += 80) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X80, batch_lt_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X80, batch_gt_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 81; batch_size < 160; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X80, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X80, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X80, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT8_P4_PERM_X80, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X8, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X8, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X8, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X8, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X16, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X16, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X16, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X16, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X24, batch_div_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X24, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X24, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X24, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X24, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X32, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X32, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X32, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X32, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X40, batch_eq_40) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(40)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X40, batch_div_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 80; batch_size < 400; batch_size += 40) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X40, batch_lt_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X40, batch_gt_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 41; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X40, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X40, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X40, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X40, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X48, batch_eq_48) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(48)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X48, batch_div_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 96; batch_size < 480; batch_size += 48) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X48, batch_lt_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X48, batch_gt_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 49; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X48, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X48, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X48, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X48, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X56, batch_eq_56) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(56)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X56, batch_div_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 112; batch_size < 560; batch_size += 56) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X56, batch_lt_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 56; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X56, batch_gt_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 57; batch_size < 112; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X56, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X56, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X56, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X56, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X64, batch_eq_64) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(64)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X64, batch_div_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 128; batch_size < 640; batch_size += 64) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X64, batch_lt_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X64, batch_gt_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 65; batch_size < 128; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X64, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X64, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X64, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X64, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X72, batch_eq_72) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(72)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X72, batch_div_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 144; batch_size < 720; batch_size += 72) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X72, batch_lt_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 72; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X72, batch_gt_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 73; batch_size < 144; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X72, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X72, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X72, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X72, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X80, batch_eq_80) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(80)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X80, batch_div_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 160; batch_size < 800; batch_size += 80) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X80, batch_lt_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X80, batch_gt_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 81; batch_size < 160; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X80, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X80, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X80, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_LUT16_P3_GATHER_X80, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X8, batch_eq_8) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X8, batch_div_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X8, batch_lt_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X8, batch_gt_8) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X8, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X8, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X8, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X8, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X16, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X16, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X16, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X16, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X24, batch_eq_24) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X24, batch_div_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X24, batch_lt_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X24, batch_gt_24) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X24, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X24, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X24, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X24, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X32, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X32, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X32, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X32, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X40, batch_eq_40) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(40)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X40, batch_div_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 80; batch_size < 400; batch_size += 40) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X40, batch_lt_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X40, batch_gt_40) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 41; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X40, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X40, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X40, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X40, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 200; batch_size += 39) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x40), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X48, batch_eq_48) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(48)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X48, batch_div_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 96; batch_size < 480; batch_size += 48) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X48, batch_lt_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X48, batch_gt_48) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 49; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X48, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X48, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X48, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X48, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X56, batch_eq_56) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(56)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x56), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X56, batch_div_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 112; batch_size < 560; batch_size += 56) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X56, batch_lt_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 56; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X56, batch_gt_56) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 57; batch_size < 112; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X56, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x56), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X56, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X56, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X56, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 280; batch_size += 55) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x56), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X64, batch_eq_64) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(64)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X64, batch_div_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 128; batch_size < 640; batch_size += 64) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X64, batch_lt_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X64, batch_gt_64) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 65; batch_size < 128; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X64, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X64, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X64, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X64, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X72, batch_eq_72) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(72)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x72), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X72, batch_div_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 144; batch_size < 720; batch_size += 72) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X72, batch_lt_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 72; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X72, batch_gt_72) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 73; batch_size < 144; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X72, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x72), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X72, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X72, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X72, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 360; batch_size += 71) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x72), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX2_RR1_P6_X80, batch_eq_80) {
+    TEST_REQUIRES_X86_AVX2;
+    VUnOpMicrokernelTester()
+      .batch_size(80)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X80, batch_div_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 160; batch_size < 800; batch_size += 80) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X80, batch_lt_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X80, batch_gt_80) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 81; batch_size < 160; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X80, inplace) {
+    TEST_REQUIRES_X86_AVX2;
+    for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X80, prescale) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X80, alpha) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX2_RR1_P6_X80, beta) {
+    TEST_REQUIRES_X86_AVX2;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx2_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X16, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X16, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X16, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X16, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X32, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X32, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X32, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X32, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X48, batch_eq_48) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(48)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X48, batch_div_48) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 96; batch_size < 480; batch_size += 48) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X48, batch_lt_48) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X48, batch_gt_48) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 49; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X48, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X48, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X48, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X48, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X64, batch_eq_64) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(64)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X64, batch_div_64) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 128; batch_size < 640; batch_size += 64) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X64, batch_lt_64) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X64, batch_gt_64) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 65; batch_size < 128; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X64, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X64, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X64, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X64, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X80, batch_eq_80) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(80)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X80, batch_div_80) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 160; batch_size < 800; batch_size += 80) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X80, batch_lt_80) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X80, batch_gt_80) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 81; batch_size < 160; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X80, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X80, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X80, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X80, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X96, batch_eq_96) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(96)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X96, batch_div_96) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 192; batch_size < 960; batch_size += 96) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X96, batch_lt_96) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X96, batch_gt_96) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 97; batch_size < 192; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X96, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 480; batch_size += 95) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X96, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 480; batch_size += 95) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X96, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 480; batch_size += 95) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X96, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 480; batch_size += 95) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X112, batch_eq_112) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(112)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X112, batch_div_112) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 224; batch_size < 1120; batch_size += 112) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X112, batch_lt_112) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 112; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X112, batch_gt_112) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 113; batch_size < 224; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X112, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 560; batch_size += 111) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X112, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 560; batch_size += 111) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X112, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 560; batch_size += 111) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X112, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 560; batch_size += 111) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X128, batch_eq_128) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(128)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X128, batch_div_128) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 256; batch_size < 1280; batch_size += 128) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X128, batch_lt_128) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 128; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X128, batch_gt_128) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 129; batch_size < 256; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X128, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 640; batch_size += 127) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X128, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 640; batch_size += 127) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X128, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 640; batch_size += 127) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_LUT16_P3_PERM_X128, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 640; batch_size += 127) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_P6_X16, batch_eq_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X16, batch_div_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X16, batch_lt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X16, batch_gt_16) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X16, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X16, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X16, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X16, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_P6_X32, batch_eq_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(32)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X32, batch_div_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X32, batch_lt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X32, batch_gt_32) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X32, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X32, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X32, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X32, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x32), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_P6_X48, batch_eq_48) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(48)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X48, batch_div_48) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 96; batch_size < 480; batch_size += 48) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X48, batch_lt_48) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X48, batch_gt_48) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 49; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X48, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X48, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X48, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X48, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 240; batch_size += 47) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x48), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_P6_X64, batch_eq_64) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(64)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X64, batch_div_64) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 128; batch_size < 640; batch_size += 64) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X64, batch_lt_64) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 64; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X64, batch_gt_64) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 65; batch_size < 128; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X64, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X64, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X64, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X64, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 320; batch_size += 63) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x64), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_P6_X80, batch_eq_80) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(80)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X80, batch_div_80) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 160; batch_size < 800; batch_size += 80) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X80, batch_lt_80) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 80; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X80, batch_gt_80) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 81; batch_size < 160; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X80, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X80, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X80, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X80, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 400; batch_size += 79) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x80), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_P6_X96, batch_eq_96) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(96)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x96), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X96, batch_div_96) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 192; batch_size < 960; batch_size += 96) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x96), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X96, batch_lt_96) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 96; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x96), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X96, batch_gt_96) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 97; batch_size < 192; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x96), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X96, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 480; batch_size += 95) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x96), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X96, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 480; batch_size += 95) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x96), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X96, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 480; batch_size += 95) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x96), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X96, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 480; batch_size += 95) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x96), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_P6_X112, batch_eq_112) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(112)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x112), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X112, batch_div_112) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 224; batch_size < 1120; batch_size += 112) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x112), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X112, batch_lt_112) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 112; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x112), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X112, batch_gt_112) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 113; batch_size < 224; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x112), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X112, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 560; batch_size += 111) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x112), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X112, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 560; batch_size += 111) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x112), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X112, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 560; batch_size += 111) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x112), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X112, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 560; batch_size += 111) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x112), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F32_VELU__AVX512F_RR1_P6_X128, batch_eq_128) {
+    TEST_REQUIRES_X86_AVX512F;
+    VUnOpMicrokernelTester()
+      .batch_size(128)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x128), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X128, batch_div_128) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 256; batch_size < 1280; batch_size += 128) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x128), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X128, batch_lt_128) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size < 128; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x128), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X128, batch_gt_128) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 129; batch_size < 256; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x128), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X128, inplace) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (size_t batch_size = 1; batch_size <= 640; batch_size += 127) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x128), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X128, prescale) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 640; batch_size += 127) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x128), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X128, alpha) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 640; batch_size += 127) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x128), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__AVX512F_RR1_P6_X128, beta) {
+    TEST_REQUIRES_X86_AVX512F;
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 640; batch_size += 127) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__avx512f_rr1_p6_x128), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X4, batch_eq_4) {
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X4, batch_div_4) {
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X4, batch_lt_4) {
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X4, batch_gt_4) {
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X4, inplace) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X4, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X4, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X4, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X8, batch_eq_8) {
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X8, batch_div_8) {
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X8, batch_lt_8) {
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X8, batch_gt_8) {
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X8, inplace) {
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X8, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X8, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X8, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X12, batch_eq_12) {
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X12, batch_div_12) {
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X12, batch_lt_12) {
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X12, batch_gt_12) {
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X12, inplace) {
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X12, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X12, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X12, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X16, batch_eq_16) {
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X16, batch_div_16) {
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X16, batch_lt_16) {
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X16, batch_gt_16) {
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X16, inplace) {
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X16, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X16, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X16, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X20, batch_eq_20) {
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X20, batch_div_20) {
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X20, batch_lt_20) {
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X20, batch_gt_20) {
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X20, inplace) {
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X20, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X20, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X20, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X24, batch_eq_24) {
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X24, batch_div_24) {
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X24, batch_lt_24) {
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X24, batch_gt_24) {
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X24, inplace) {
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X24, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X24, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_LUT16_P3_X24, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X4, batch_eq_4) {
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X4, batch_div_4) {
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X4, batch_lt_4) {
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X4, batch_gt_4) {
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X4, inplace) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X4, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X4, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X4, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X8, batch_eq_8) {
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X8, batch_div_8) {
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X8, batch_lt_8) {
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X8, batch_gt_8) {
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X8, inplace) {
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X8, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X8, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X8, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X12, batch_eq_12) {
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X12, batch_div_12) {
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X12, batch_lt_12) {
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X12, batch_gt_12) {
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X12, inplace) {
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X12, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X12, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X12, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X16, batch_eq_16) {
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X16, batch_div_16) {
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X16, batch_lt_16) {
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X16, batch_gt_16) {
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X16, inplace) {
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X16, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X16, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X16, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X20, batch_eq_20) {
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X20, batch_div_20) {
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X20, batch_lt_20) {
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X20, batch_gt_20) {
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X20, inplace) {
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X20, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X20, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X20, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X24, batch_eq_24) {
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X24, batch_div_24) {
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X24, batch_lt_24) {
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X24, batch_gt_24) {
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X24, inplace) {
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X24, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X24, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_LUT16_P3_X24, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X4, batch_eq_4) {
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X4, batch_div_4) {
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X4, batch_lt_4) {
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X4, batch_gt_4) {
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X4, inplace) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X4, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X4, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X4, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X8, batch_eq_8) {
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X8, batch_div_8) {
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X8, batch_lt_8) {
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X8, batch_gt_8) {
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X8, inplace) {
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X8, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X8, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X8, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X12, batch_eq_12) {
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X12, batch_div_12) {
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X12, batch_lt_12) {
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X12, batch_gt_12) {
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X12, inplace) {
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X12, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X12, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X12, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X16, batch_eq_16) {
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X16, batch_div_16) {
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X16, batch_lt_16) {
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X16, batch_gt_16) {
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X16, inplace) {
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X16, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X16, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X16, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X20, batch_eq_20) {
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X20, batch_div_20) {
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X20, batch_lt_20) {
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X20, batch_gt_20) {
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X20, inplace) {
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X20, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X20, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X20, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X24, batch_eq_24) {
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X24, batch_div_24) {
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X24, batch_lt_24) {
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X24, batch_gt_24) {
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X24, inplace) {
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X24, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X24, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_ARM_RR2_P6_X24, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X4, batch_eq_4) {
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X4, batch_div_4) {
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X4, batch_lt_4) {
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X4, batch_gt_4) {
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X4, inplace) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X4, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X4, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X4, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X8, batch_eq_8) {
+    VUnOpMicrokernelTester()
+      .batch_size(8)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X8, batch_div_8) {
+    for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X8, batch_lt_8) {
+    for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X8, batch_gt_8) {
+    for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X8, inplace) {
+    for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X8, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X8, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X8, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X12, batch_eq_12) {
+    VUnOpMicrokernelTester()
+      .batch_size(12)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X12, batch_div_12) {
+    for (size_t batch_size = 24; batch_size < 120; batch_size += 12) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X12, batch_lt_12) {
+    for (size_t batch_size = 1; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X12, batch_gt_12) {
+    for (size_t batch_size = 13; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X12, inplace) {
+    for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X12, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X12, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X12, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 60; batch_size += 11) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X16, batch_eq_16) {
+    VUnOpMicrokernelTester()
+      .batch_size(16)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X16, batch_div_16) {
+    for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X16, batch_lt_16) {
+    for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X16, batch_gt_16) {
+    for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X16, inplace) {
+    for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X16, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X16, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X16, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X20, batch_eq_20) {
+    VUnOpMicrokernelTester()
+      .batch_size(20)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X20, batch_div_20) {
+    for (size_t batch_size = 40; batch_size < 200; batch_size += 20) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X20, batch_lt_20) {
+    for (size_t batch_size = 1; batch_size < 20; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X20, batch_gt_20) {
+    for (size_t batch_size = 21; batch_size < 40; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X20, inplace) {
+    for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X20, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X20, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X20, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 100; batch_size += 19) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X24, batch_eq_24) {
+    VUnOpMicrokernelTester()
+      .batch_size(24)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X24, batch_div_24) {
+    for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X24, batch_lt_24) {
+    for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X24, batch_gt_24) {
+    for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X24, inplace) {
+    for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X24, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X24, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASMSIMD_X86_RR2_P6_X24, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X1, batch_eq_1) {
+    VUnOpMicrokernelTester()
+      .batch_size(1)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X1, batch_gt_1) {
+    for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X1, inplace) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X1, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X1, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X1, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X2, batch_eq_2) {
+    VUnOpMicrokernelTester()
+      .batch_size(2)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X2, batch_div_2) {
+    for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X2, batch_lt_2) {
+    for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X2, batch_gt_2) {
+    for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X2, inplace) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X2, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X2, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X2, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X3, batch_eq_3) {
+    VUnOpMicrokernelTester()
+      .batch_size(3)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X3, batch_div_3) {
+    for (size_t batch_size = 6; batch_size < 30; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X3, batch_lt_3) {
+    for (size_t batch_size = 1; batch_size < 3; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X3, batch_gt_3) {
+    for (size_t batch_size = 4; batch_size < 6; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X3, inplace) {
+    for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X3, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X3, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X3, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X4, batch_eq_4) {
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X4, batch_div_4) {
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X4, batch_lt_4) {
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X4, batch_gt_4) {
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X4, inplace) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X4, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X4, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X4, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X5, batch_eq_5) {
+    VUnOpMicrokernelTester()
+      .batch_size(5)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X5, batch_div_5) {
+    for (size_t batch_size = 10; batch_size < 50; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X5, batch_lt_5) {
+    for (size_t batch_size = 1; batch_size < 5; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X5, batch_gt_5) {
+    for (size_t batch_size = 6; batch_size < 10; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X5, inplace) {
+    for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X5, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X5, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X5, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X6, batch_eq_6) {
+    VUnOpMicrokernelTester()
+      .batch_size(6)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X6, batch_div_6) {
+    for (size_t batch_size = 12; batch_size < 60; batch_size += 6) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X6, batch_lt_6) {
+    for (size_t batch_size = 1; batch_size < 6; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X6, batch_gt_6) {
+    for (size_t batch_size = 7; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X6, inplace) {
+    for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X6, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X6, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_LUT16_P3_X6, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_P6_X1, batch_eq_1) {
+    VUnOpMicrokernelTester()
+      .batch_size(1)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X1, batch_gt_1) {
+    for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X1, inplace) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X1, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X1, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X1, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_P6_X2, batch_eq_2) {
+    VUnOpMicrokernelTester()
+      .batch_size(2)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X2, batch_div_2) {
+    for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X2, batch_lt_2) {
+    for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X2, batch_gt_2) {
+    for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X2, inplace) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X2, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X2, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X2, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_P6_X3, batch_eq_3) {
+    VUnOpMicrokernelTester()
+      .batch_size(3)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X3, batch_div_3) {
+    for (size_t batch_size = 6; batch_size < 30; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X3, batch_lt_3) {
+    for (size_t batch_size = 1; batch_size < 3; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X3, batch_gt_3) {
+    for (size_t batch_size = 4; batch_size < 6; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X3, inplace) {
+    for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X3, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X3, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X3, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_P6_X4, batch_eq_4) {
+    VUnOpMicrokernelTester()
+      .batch_size(4)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X4, batch_div_4) {
+    for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X4, batch_lt_4) {
+    for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X4, batch_gt_4) {
+    for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X4, inplace) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X4, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X4, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X4, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_P6_X5, batch_eq_5) {
+    VUnOpMicrokernelTester()
+      .batch_size(5)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X5, batch_div_5) {
+    for (size_t batch_size = 10; batch_size < 50; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X5, batch_lt_5) {
+    for (size_t batch_size = 1; batch_size < 5; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X5, batch_gt_5) {
+    for (size_t batch_size = 6; batch_size < 10; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X5, inplace) {
+    for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X5, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X5, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X5, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+  TEST(F32_VELU__WASM_RR2_P6_X6, batch_eq_6) {
+    VUnOpMicrokernelTester()
+      .batch_size(6)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU);
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X6, batch_div_6) {
+    for (size_t batch_size = 12; batch_size < 60; batch_size += 6) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X6, batch_lt_6) {
+    for (size_t batch_size = 1; batch_size < 6; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X6, batch_gt_6) {
+    for (size_t batch_size = 7; batch_size < 12; batch_size++) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X6, inplace) {
+    for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .inplace(true)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU);
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X6, prescale) {
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X6, alpha) {
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+
+  TEST(F32_VELU__WASM_RR2_P6_X6, beta) {
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__wasm_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU);
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASM || XNN_ARCH_WASMSIMD
+
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X1, batch_eq_1) {
+  VUnOpMicrokernelTester()
+    .batch_size(1)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X1, batch_gt_1) {
+  for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X1, inplace) {
+  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X1, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X1, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X1, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X2, batch_eq_2) {
+  VUnOpMicrokernelTester()
+    .batch_size(2)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X2, batch_div_2) {
+  for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X2, batch_lt_2) {
+  for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X2, batch_gt_2) {
+  for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X2, inplace) {
+  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X2, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X2, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X2, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X3, batch_eq_3) {
+  VUnOpMicrokernelTester()
+    .batch_size(3)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X3, batch_div_3) {
+  for (size_t batch_size = 6; batch_size < 30; batch_size += 3) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X3, batch_lt_3) {
+  for (size_t batch_size = 1; batch_size < 3; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X3, batch_gt_3) {
+  for (size_t batch_size = 4; batch_size < 6; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X3, inplace) {
+  for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X3, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X3, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X3, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X4, batch_eq_4) {
+  VUnOpMicrokernelTester()
+    .batch_size(4)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X4, batch_div_4) {
+  for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X4, batch_lt_4) {
+  for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X4, batch_gt_4) {
+  for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X4, inplace) {
+  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X4, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X4, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X4, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X5, batch_eq_5) {
+  VUnOpMicrokernelTester()
+    .batch_size(5)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X5, batch_div_5) {
+  for (size_t batch_size = 10; batch_size < 50; batch_size += 5) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X5, batch_lt_5) {
+  for (size_t batch_size = 1; batch_size < 5; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X5, batch_gt_5) {
+  for (size_t batch_size = 6; batch_size < 10; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X5, inplace) {
+  for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X5, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X5, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X5, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X6, batch_eq_6) {
+  VUnOpMicrokernelTester()
+    .batch_size(6)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X6, batch_div_6) {
+  for (size_t batch_size = 12; batch_size < 60; batch_size += 6) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X6, batch_lt_6) {
+  for (size_t batch_size = 1; batch_size < 6; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X6, batch_gt_6) {
+  for (size_t batch_size = 7; batch_size < 12; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X6, inplace) {
+  for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X6, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X6, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_LUT16_P3_X6, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X1, batch_eq_1) {
+  VUnOpMicrokernelTester()
+    .batch_size(1)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X1, batch_gt_1) {
+  for (size_t batch_size = 2; batch_size < 10; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X1, inplace) {
+  for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X1, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X1, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X1, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 5; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x1), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X2, batch_eq_2) {
+  VUnOpMicrokernelTester()
+    .batch_size(2)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X2, batch_div_2) {
+  for (size_t batch_size = 4; batch_size < 20; batch_size += 2) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X2, batch_lt_2) {
+  for (size_t batch_size = 1; batch_size < 2; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X2, batch_gt_2) {
+  for (size_t batch_size = 3; batch_size < 4; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X2, inplace) {
+  for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X2, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X2, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X2, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 10; batch_size += 1) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x2), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X3, batch_eq_3) {
+  VUnOpMicrokernelTester()
+    .batch_size(3)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X3, batch_div_3) {
+  for (size_t batch_size = 6; batch_size < 30; batch_size += 3) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X3, batch_lt_3) {
+  for (size_t batch_size = 1; batch_size < 3; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X3, batch_gt_3) {
+  for (size_t batch_size = 4; batch_size < 6; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X3, inplace) {
+  for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X3, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X3, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X3, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 15; batch_size += 2) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x3), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X4, batch_eq_4) {
+  VUnOpMicrokernelTester()
+    .batch_size(4)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X4, batch_div_4) {
+  for (size_t batch_size = 8; batch_size < 40; batch_size += 4) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X4, batch_lt_4) {
+  for (size_t batch_size = 1; batch_size < 4; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X4, batch_gt_4) {
+  for (size_t batch_size = 5; batch_size < 8; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X4, inplace) {
+  for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X4, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X4, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X4, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 20; batch_size += 3) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x4), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X5, batch_eq_5) {
+  VUnOpMicrokernelTester()
+    .batch_size(5)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X5, batch_div_5) {
+  for (size_t batch_size = 10; batch_size < 50; batch_size += 5) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X5, batch_lt_5) {
+  for (size_t batch_size = 1; batch_size < 5; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X5, batch_gt_5) {
+  for (size_t batch_size = 6; batch_size < 10; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X5, inplace) {
+  for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X5, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X5, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X5, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 25; batch_size += 4) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x5), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X6, batch_eq_6) {
+  VUnOpMicrokernelTester()
+    .batch_size(6)
+    .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X6, batch_div_6) {
+  for (size_t batch_size = 12; batch_size < 60; batch_size += 6) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X6, batch_lt_6) {
+  for (size_t batch_size = 1; batch_size < 6; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X6, batch_gt_6) {
+  for (size_t batch_size = 7; batch_size < 12; batch_size++) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X6, inplace) {
+  for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+    VUnOpMicrokernelTester()
+      .batch_size(batch_size)
+      .inplace(true)
+      .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X6, prescale) {
+  for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+    for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .prescale(prescale)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X6, alpha) {
+  for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .alpha(alpha)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
+
+TEST(F32_VELU__SCALAR_RR2_P6_X6, beta) {
+  for (float beta : std::vector<float>({0.3f, 3.0f})) {
+    for (size_t batch_size = 1; batch_size <= 30; batch_size += 5) {
+      VUnOpMicrokernelTester()
+        .batch_size(batch_size)
+        .beta(beta)
+        .Test(xnn_f32_vunary_ukernel_function(xnn_f32_velu_ukernel__scalar_rr2_p6_x6), VUnOpMicrokernelTester::OpType::ELU, VUnOpMicrokernelTester::Variant::Scalar);
+    }
+  }
+}
\ No newline at end of file
diff --git a/test/f32-velu.yaml b/test/f32-velu.yaml
new file mode 100644
index 0000000..2bb4e00
--- /dev/null
+++ b/test/f32-velu.yaml
@@ -0,0 +1,174 @@
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+- name: xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4
+- name: xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8
+- name: xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12
+- name: xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16
+- name: xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20
+- name: xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24
+- name: xnn_f32_velu_ukernel__neon_rr2_p6_x4
+- name: xnn_f32_velu_ukernel__neon_rr2_p6_x8
+- name: xnn_f32_velu_ukernel__neon_rr2_p6_x12
+- name: xnn_f32_velu_ukernel__neon_rr2_p6_x16
+- name: xnn_f32_velu_ukernel__neon_rr2_p6_x20
+- name: xnn_f32_velu_ukernel__neon_rr2_p6_x24
+- name: xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4
+- name: xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8
+- name: xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12
+- name: xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16
+- name: xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20
+- name: xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24
+- name: xnn_f32_velu_ukernel__neonfma_rr1_p6_x4
+- name: xnn_f32_velu_ukernel__neonfma_rr1_p6_x8
+- name: xnn_f32_velu_ukernel__neonfma_rr1_p6_x12
+- name: xnn_f32_velu_ukernel__neonfma_rr1_p6_x16
+- name: xnn_f32_velu_ukernel__neonfma_rr1_p6_x20
+- name: xnn_f32_velu_ukernel__neonfma_rr1_p6_x24
+- name: xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x4
+- name: xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x8
+- name: xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x12
+- name: xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x16
+- name: xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x20
+- name: xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_x24
+- name: xnn_f32_velu_ukernel__sse2_rr2_p6_x4
+- name: xnn_f32_velu_ukernel__sse2_rr2_p6_x8
+- name: xnn_f32_velu_ukernel__sse2_rr2_p6_x12
+- name: xnn_f32_velu_ukernel__sse2_rr2_p6_x16
+- name: xnn_f32_velu_ukernel__sse2_rr2_p6_x20
+- name: xnn_f32_velu_ukernel__sse2_rr2_p6_x24
+- name: xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x4
+- name: xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x8
+- name: xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x12
+- name: xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x16
+- name: xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x20
+- name: xnn_f32_velu_ukernel__sse41_rr2_lut16_p3_x24
+- name: xnn_f32_velu_ukernel__sse41_rr2_p6_x4
+- name: xnn_f32_velu_ukernel__sse41_rr2_p6_x8
+- name: xnn_f32_velu_ukernel__sse41_rr2_p6_x12
+- name: xnn_f32_velu_ukernel__sse41_rr2_p6_x16
+- name: xnn_f32_velu_ukernel__sse41_rr2_p6_x20
+- name: xnn_f32_velu_ukernel__sse41_rr2_p6_x24
+- name: xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8
+- name: xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16
+- name: xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24
+- name: xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32
+- name: xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40
+- name: xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48
+- name: xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x8
+- name: xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x16
+- name: xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x24
+- name: xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x32
+- name: xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x40
+- name: xnn_f32_velu_ukernel__avx_rr2_lut16_p3_x48
+- name: xnn_f32_velu_ukernel__avx_rr2_p6_x8
+- name: xnn_f32_velu_ukernel__avx_rr2_p6_x16
+- name: xnn_f32_velu_ukernel__avx_rr2_p6_x24
+- name: xnn_f32_velu_ukernel__avx_rr2_p6_x32
+- name: xnn_f32_velu_ukernel__avx_rr2_p6_x40
+- name: xnn_f32_velu_ukernel__avx_rr2_p6_x48
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72
+- name: xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x8
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x16
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x24
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x32
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x40
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x48
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x56
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x64
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x72
+- name: xnn_f32_velu_ukernel__avx2_rr1_p6_x80
+- name: xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16
+- name: xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32
+- name: xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48
+- name: xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64
+- name: xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80
+- name: xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96
+- name: xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112
+- name: xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128
+- name: xnn_f32_velu_ukernel__avx512f_rr1_p6_x16
+- name: xnn_f32_velu_ukernel__avx512f_rr1_p6_x32
+- name: xnn_f32_velu_ukernel__avx512f_rr1_p6_x48
+- name: xnn_f32_velu_ukernel__avx512f_rr1_p6_x64
+- name: xnn_f32_velu_ukernel__avx512f_rr1_p6_x80
+- name: xnn_f32_velu_ukernel__avx512f_rr1_p6_x96
+- name: xnn_f32_velu_ukernel__avx512f_rr1_p6_x112
+- name: xnn_f32_velu_ukernel__avx512f_rr1_p6_x128
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20
+- name: xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20
+- name: xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24
+- name: xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1
+- name: xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2
+- name: xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3
+- name: xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4
+- name: xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5
+- name: xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6
+- name: xnn_f32_velu_ukernel__wasm_rr2_p6_x1
+- name: xnn_f32_velu_ukernel__wasm_rr2_p6_x2
+- name: xnn_f32_velu_ukernel__wasm_rr2_p6_x3
+- name: xnn_f32_velu_ukernel__wasm_rr2_p6_x4
+- name: xnn_f32_velu_ukernel__wasm_rr2_p6_x5
+- name: xnn_f32_velu_ukernel__wasm_rr2_p6_x6
+- name: xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1
+- name: xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2
+- name: xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3
+- name: xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4
+- name: xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5
+- name: xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6
+- name: xnn_f32_velu_ukernel__scalar_rr2_p6_x1
+- name: xnn_f32_velu_ukernel__scalar_rr2_p6_x2
+- name: xnn_f32_velu_ukernel__scalar_rr2_p6_x3
+- name: xnn_f32_velu_ukernel__scalar_rr2_p6_x4
+- name: xnn_f32_velu_ukernel__scalar_rr2_p6_x5
+- name: xnn_f32_velu_ukernel__scalar_rr2_p6_x6
diff --git a/test/f32-vlrelu.cc b/test/f32-vlrelu.cc
index 72f6c09..20731db 100644
--- a/test/f32-vlrelu.cc
+++ b/test/f32-vlrelu.cc
@@ -1114,6 +1114,7 @@
   }
 }
 
+
 TEST(F32_VLRELU__SCALAR_X2, batch_eq_2) {
   VUnOpMicrokernelTester()
     .batch_size(2)
@@ -1164,6 +1165,7 @@
   }
 }
 
+
 TEST(F32_VLRELU__SCALAR_X4, batch_eq_4) {
   VUnOpMicrokernelTester()
     .batch_size(4)
@@ -1212,4 +1214,4 @@
         .Test(xnn_f32_vunary_ukernel_function(xnn_f32_vlrelu_ukernel__scalar_x4), VUnOpMicrokernelTester::OpType::LeakyReLU, VUnOpMicrokernelTester::Variant::Scalar);
     }
   }
-}
\ No newline at end of file
+}
diff --git a/test/qs8-requantization.cc b/test/qs8-requantization.cc
index 4737544..c6e7c95 100644
--- a/test/qs8-requantization.cc
+++ b/test/qs8-requantization.cc
@@ -391,115 +391,6 @@
 }
 
 
-#if !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
-  /*
-   * Precise PSIMD implementation using unsigned 32-bit arithmetics.
-   */
-
-  TEST(QS8_PRECISE__PSIMD, exact_divide_by_po2) {
-    for (uint32_t s = 1; s < 32; s++) {
-      RequantizationTester()
-        .qmin(std::numeric_limits<int8_t>::min())
-        .qmax(std::numeric_limits<int8_t>::max())
-        .s(s)
-        .TestExactDivideByPO2(xnn_qs8_requantize_precise__psimd);
-    }
-  }
-
-  TEST(QS8_PRECISE__PSIMD, exact_divide_by_po2_with_zero_point) {
-    for (int32_t zero_point = std::numeric_limits<int8_t>::min();
-         zero_point <= std::numeric_limits<int8_t>::max();
-         zero_point++)
-    {
-      for (uint32_t s = 1; s < 32; s++) {
-        RequantizationTester()
-          .zero_point(zero_point)
-          .qmin(std::numeric_limits<int8_t>::min())
-          .qmax(std::numeric_limits<int8_t>::max())
-          .s(s)
-          .TestExactDivideByPO2(xnn_qs8_requantize_precise__psimd);
-      }
-    }
-  }
-
-  TEST(QS8_PRECISE__PSIMD, divide_by_po2_with_rounding_up) {
-    for (int32_t zero_point = std::numeric_limits<int8_t>::min();
-         zero_point <= std::numeric_limits<int8_t>::max();
-         zero_point++)
-    {
-      for (uint32_t s = 1; s < 32; s++) {
-        RequantizationTester()
-          .zero_point(zero_point)
-          .qmin(std::numeric_limits<int8_t>::min())
-          .qmax(std::numeric_limits<int8_t>::max())
-          .s(s)
-          .TestDivideByPO2WithRoundingUp(xnn_qs8_requantize_precise__psimd);
-      }
-    }
-  }
-
-  TEST(QS8_PRECISE__PSIMD, divide_by_po2_with_rounding_down) {
-    for (int32_t zero_point = std::numeric_limits<int8_t>::min();
-         zero_point <= std::numeric_limits<int8_t>::max();
-         zero_point++)
-    {
-      for (uint32_t s = 1; s < 32; s++) {
-        RequantizationTester()
-          .zero_point(zero_point)
-          .qmin(std::numeric_limits<int8_t>::min())
-          .qmax(std::numeric_limits<int8_t>::max())
-          .s(s)
-          .TestDivideByPO2WithRoundingDown(xnn_qs8_requantize_precise__psimd);
-      }
-    }
-  }
-
-  TEST(QS8_PRECISE__PSIMD, divide_by_po2_with_rounding_away) {
-    for (int32_t zero_point = std::numeric_limits<int8_t>::min();
-         zero_point <= std::numeric_limits<int8_t>::max();
-         zero_point++)
-    {
-      for (uint32_t s = 1; s < 32; s++) {
-        RequantizationTester()
-          .zero_point(zero_point)
-          .qmin(std::numeric_limits<int8_t>::min())
-          .qmax(std::numeric_limits<int8_t>::max())
-          .s(s)
-          .TestDivideByPO2WithRoundingAway(xnn_qs8_requantize_precise__psimd);
-      }
-    }
-  }
-
-  TEST(QS8_PRECISE__PSIMD, special_cases) {
-    RequantizationTester()
-      .qmin(std::numeric_limits<int8_t>::min())
-      .qmax(std::numeric_limits<int8_t>::max())
-      .TestSpecialCases(xnn_qs8_requantize_precise__psimd);
-  }
-
-  TEST(QS8_PRECISE__PSIMD, random_cases) {
-    RequantizationTester()
-      .qmin(std::numeric_limits<int8_t>::min())
-      .qmax(std::numeric_limits<int8_t>::max())
-      .iterations(100)
-      .TestRandomCasesPrecise(xnn_qs8_requantize_precise__psimd);
-  }
-
-
-  /*
-   * FP32-based PSIMD implementation using magic trick for FP32->INT32 conversion.
-   */
-
-  TEST(QS8_FP32__PSIMD, random_cases) {
-    RequantizationTester()
-      .qmin(std::numeric_limits<int8_t>::min())
-      .qmax(std::numeric_limits<int8_t>::max())
-      .iterations(1000)
-      .TestRandomCasesApproximate(xnn_qs8_requantize_fp32__psimd);
-  }
-#endif  // !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
-
-
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
   /*
    * Precise SSE2 implementation using floating-point shuffle.
diff --git a/test/qu8-requantization.cc b/test/qu8-requantization.cc
index 57e55c8..6835e11 100644
--- a/test/qu8-requantization.cc
+++ b/test/qu8-requantization.cc
@@ -352,104 +352,6 @@
 }
 
 
-#if !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
-  /*
-   * Precise PSIMD implementation using unsigned 32-bit arithmetics.
-   */
-
-  TEST(QU8_PRECISE__PSIMD, exact_divide_by_po2) {
-    for (uint32_t s = 1; s < 32; s++) {
-      RequantizationTester()
-        .qmin(std::numeric_limits<uint8_t>::min())
-        .qmax(std::numeric_limits<uint8_t>::max())
-        .s(s)
-        .TestExactDivideByPO2(xnn_qu8_requantize_precise__psimd);
-    }
-  }
-
-  TEST(QU8_PRECISE__PSIMD, exact_divide_by_po2_with_zero_point) {
-    for (int32_t zero_point = 1; zero_point < 256; zero_point++) {
-      for (uint32_t s = 1; s < 32; s++) {
-        RequantizationTester()
-          .zero_point(zero_point)
-          .qmin(std::numeric_limits<uint8_t>::min())
-          .qmax(std::numeric_limits<uint8_t>::max())
-          .s(s)
-          .TestExactDivideByPO2(xnn_qu8_requantize_precise__psimd);
-      }
-    }
-  }
-
-  TEST(QU8_PRECISE__PSIMD, divide_by_po2_with_rounding_up) {
-    for (int32_t zero_point = 0; zero_point < 256; zero_point++) {
-      for (uint32_t s = 1; s < 32; s++) {
-        RequantizationTester()
-          .zero_point(zero_point)
-          .qmin(std::numeric_limits<uint8_t>::min())
-          .qmax(std::numeric_limits<uint8_t>::max())
-          .s(s)
-          .TestDivideByPO2WithRoundingUp(xnn_qu8_requantize_precise__psimd);
-      }
-    }
-  }
-
-  TEST(QU8_PRECISE__PSIMD, divide_by_po2_with_rounding_down) {
-    for (int32_t zero_point = 0; zero_point < 256; zero_point++) {
-      for (uint32_t s = 1; s < 32; s++) {
-        RequantizationTester()
-          .zero_point(zero_point)
-          .qmin(std::numeric_limits<uint8_t>::min())
-          .qmax(std::numeric_limits<uint8_t>::max())
-          .s(s)
-          .TestDivideByPO2WithRoundingDown(xnn_qu8_requantize_precise__psimd);
-      }
-    }
-  }
-
-  TEST(QU8_PRECISE__PSIMD, divide_by_po2_with_rounding_away) {
-    for (int32_t zero_point = 0; zero_point < 256; zero_point++) {
-      for (uint32_t s = 1; s < 32; s++) {
-        RequantizationTester()
-          .zero_point(zero_point)
-          .qmin(std::numeric_limits<uint8_t>::min())
-          .qmax(std::numeric_limits<uint8_t>::max())
-          .s(s)
-          .TestDivideByPO2WithRoundingAway(xnn_qu8_requantize_precise__psimd);
-      }
-    }
-  }
-
-  TEST(QU8_PRECISE__PSIMD, special_cases) {
-    RequantizationTester()
-      .qmin(std::numeric_limits<uint8_t>::min())
-      .qmax(std::numeric_limits<uint8_t>::max())
-      .TestSpecialCases(xnn_qu8_requantize_precise__psimd);
-  }
-
-  TEST(QU8_PRECISE__PSIMD, random_cases) {
-    RequantizationTester()
-      .qmin(std::numeric_limits<uint8_t>::min())
-      .qmax(std::numeric_limits<uint8_t>::max())
-      .zero_point(128)
-      .iterations(100)
-      .TestRandomCasesPrecise(xnn_qu8_requantize_precise__psimd);
-  }
-
-
-  /*
-   * FP32-based PSIMD implementation using magic trick for FP32->INT32 conversion.
-   */
-
-  TEST(QU8_FP32__PSIMD, random_cases) {
-    RequantizationTester()
-      .qmin(std::numeric_limits<uint8_t>::min())
-      .qmax(std::numeric_limits<uint8_t>::max())
-      .iterations(1000)
-      .TestRandomCasesApproximate(xnn_qu8_requantize_fp32__psimd);
-  }
-#endif  // !XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC
-
-
 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
   /*
    * Precise SSE2 implementation using floating-point shuffle.
diff --git a/test/subgraph-nchw.cc b/test/subgraph-nchw.cc
index 6db0a1e..01fc8cc 100644
--- a/test/subgraph-nchw.cc
+++ b/test/subgraph-nchw.cc
@@ -8,65 +8,95 @@
 #include "subgraph-tester.h"
 #include <gtest/gtest.h>
 
-TEST(SUBGRAPH_NCHW, one_layer_model) {
-  std::map<uint32_t, std::pair<xnn_layout_type, xnn_layout_type>>
-      expected_layouts = {
-          {0, {xnn_layout_type_nhwc, xnn_layout_type_nhwc}},
-      };
+TEST(SUBGRAPH_NCHW, single_conv) {
+  auto tester = SubgraphTester(4);
+  tester
+    .add_tensor({1, 256, 256, 3}, kDynamic, 0)
+    .add_tensor({32, 3, 3, 3}, kStaticDense, 1)
+    .add_tensor({32}, kStaticDense, 2)
+    .add_tensor({1, 128, 128, 32}, kDynamic, 3)
+    .add_conv(1, 1, 1, 1, 3, 3, 2, 2, 1, 1, 1, 3, 32, 0, 1, 2, 3)
+    .optimize()
+    .rewrite();
 
-  SubgraphTester(4)
-      .add_tensor({1, 256, 256, 3}, kDynamic, 0)
-      .add_tensor({32, 3, 3, 3}, kStatic, 1)
-      .add_tensor({32}, kStatic, 2)
-      .add_tensor({1, 128, 128, 32}, kDynamic, 3)
-      .add_conv(1, 1, 1, 1, 3, 3, 2, 2, 1, 1, 1, 3, 32, 0, 1, 2, 3)
-      .optimize()
-      .rewrite()
-      .CheckLayouts(expected_layouts);
+  ASSERT_EQ(tester.get_layout(0), xnn_layout_type_nhwc);
+  ASSERT_EQ(tester.get_layout(3), xnn_layout_type_nhwc);
 }
 
-TEST(SUBGRAPH_NCHW, two_layers_model) {
-  std::map<uint32_t, std::pair<xnn_layout_type, xnn_layout_type>>
-      expected_layouts = {
-          {0, {xnn_layout_type_nhwc, xnn_layout_type_nchw}},
-          {1, {xnn_layout_type_nchw, xnn_layout_type_nhwc}},
-      };
-  SubgraphTester(5)
-      .add_tensor({1, 256, 256, 3}, kDynamic, 0)
-      .add_tensor({32, 3, 3, 3}, kStatic, 1)
-      .add_tensor({32}, kStatic, 2)
-      .add_tensor({1, 128, 128, 32}, kDynamic, 3)
-      .add_conv(1, 1, 1, 1, 3, 3, 2, 2, 1, 1, 1, 3, 32, 0, 1, 2, 3)
-      .add_tensor({32}, kDynamic, 4)
-      .add_global_average_pooling(3, 4)
-      .optimize()
-      .rewrite()
-      .CheckLayouts(expected_layouts);
+TEST(SUBGRAPH_NCHW, single_conv_and_global_average_pooling) {
+  auto tester = SubgraphTester(5);
+  tester
+    .add_tensor({1, 256, 256, 3}, kDynamic, 0)
+    .add_tensor({32, 3, 3, 3}, kStaticDense, 1)
+    .add_tensor({32}, kStaticDense, 2)
+    .add_tensor({1, 128, 128, 32}, kDynamic, 3)
+    .add_tensor({32}, kDynamic, 4)
+    .add_conv(1, 1, 1, 1, 3, 3, 2, 2, 1, 1, 1, 3, 32, 0, 1, 2, 3)
+    .add_global_average_pooling(3, 4)
+    .optimize()
+    .rewrite();
+
+  ASSERT_EQ(tester.get_layout(0), xnn_layout_type_nhwc);
+  ASSERT_EQ(tester.get_layout(3), xnn_layout_type_nhwc);
+  ASSERT_EQ(tester.get_layout(4), xnn_layout_type_nhwc);
 }
 
-TEST(SUBGRAPH_NCHW, two_layers_with_bottleneck_model) {
-  std::map<uint32_t, std::pair<xnn_layout_type, xnn_layout_type>>
-      expected_layouts = {
-          {0, {xnn_layout_type_nhwc, xnn_layout_type_nchw}},
-          {1, {xnn_layout_type_nchw, xnn_layout_type_nchw}},
-          {2, {xnn_layout_type_nchw, xnn_layout_type_nchw}},
-          {3, {xnn_layout_type_nchw, xnn_layout_type_nhwc}},
-      };
-  SubgraphTester(9)
-      .add_tensor({1, 256, 256, 3}, kDynamic, 0)
-      .add_tensor({32, 3, 3, 3}, kStatic, 1)
-      .add_tensor({32}, kStatic, 2)
-      .add_tensor({1, 128, 128, 32}, kDynamic, 3)
-      .add_conv(1, 1, 1, 1, 3, 3, 2, 2, 1, 1, 1, 3, 32, 0, 1, 2, 3)
-      .add_tensor({1, 3, 3, 2}, kStatic, 4)
-      .add_tensor({32}, kStatic, 5)
-      .add_tensor({1, 128, 128, 32}, kDynamic, 6)
-      .add_depthwise_conv(1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 1, 32, 3, 4, 5, 6)
-      .add_tensor({1, 128, 128, 32}, kDynamic, 7)
-      .add_addition(3, 6, 7)
-      .add_tensor({32}, kDynamic, 8)
-      .add_global_average_pooling(7, 8)
-      .optimize()
-      .rewrite()
-      .CheckLayouts(expected_layouts);
+TEST(SUBGRAPH_NCHW, pixelwise_conv_sandwich) {
+  auto tester = SubgraphTester(8);
+  tester
+    .add_tensor({1, 256, 256, 3}, kDynamic, 0)
+    .add_tensor({8, 3, 3, 3}, kStaticDense, 1)
+    .add_tensor({8}, kStaticDense, 2)
+    .add_tensor({1, 128, 128, 8}, kDynamic, 3)
+    .add_tensor({4, 1, 1, 8}, kStaticSparse, 4)
+    .add_tensor({4}, kStaticDense, 5)
+    .add_tensor({1, 128, 128, 4}, kDynamic, 6)
+    .add_tensor({1, 4}, kDynamic, 7)
+    .add_conv(1, 1, 1, 1, 3, 3, 2, 2, 1, 1, 1, 3, 8, 0, 1, 2, 3)
+    .add_conv(0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 8, 4, 3, 4, 5, 6)
+    .add_global_average_pooling(6, 7)
+    .optimize()
+    .rewrite();
+
+  ASSERT_EQ(tester.get_layout(0), xnn_layout_type_nhwc);
+  ASSERT_EQ(tester.get_layout(3), xnn_layout_type_nchw);
+  ASSERT_EQ(tester.get_layout(6), xnn_layout_type_nchw);
+  ASSERT_EQ(tester.get_layout(7), xnn_layout_type_nhwc);
+}
+
+TEST(SUBGRAPH_NCHW, bottleneck) {
+  auto tester = SubgraphTester(15);
+  tester
+    .add_tensor({1, 256, 256, 3}, kDynamic, 0)
+    .add_tensor({8, 3, 3, 3}, kStaticDense, 1)
+    .add_tensor({8}, kStaticDense, 2)
+    .add_tensor({1, 128, 128, 8}, kDynamic, 3)
+    .add_tensor({4, 1, 1, 8}, kStaticSparse, 4)
+    .add_tensor({4}, kStaticDense, 5)
+    .add_tensor({1, 128, 128, 4}, kDynamic, 6)
+    .add_tensor({1, 3, 3, 4}, kStaticDense, 7)
+    .add_tensor({4}, kStaticDense, 8)
+    .add_tensor({1, 128, 128, 4}, kDynamic, 9)
+    .add_tensor({8, 1, 1, 4}, kStaticSparse, 10)
+    .add_tensor({8}, kStaticDense, 11)
+    .add_tensor({1, 128, 128, 8}, kDynamic, 12)
+    .add_tensor({1, 128, 128, 8}, kDynamic, 13)
+    .add_tensor({1, 128, 128, 8}, kDynamic, 13)
+    .add_tensor({1, 8}, kDynamic, 14)
+    .add_conv(1, 1, 1, 1, 3, 3, 2, 2, 1, 1, 1, 3, 8, 0, 1, 2, 3)
+    .add_conv(0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 8, 4, 3, 4, 5, 6)
+    .add_depthwise_conv(1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 1, 4, 6, 7, 8, 9)
+    .add_conv(0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 8, 4, 9, 10, 11, 12)
+    .add_addition(3, 12, 13)
+    .add_global_average_pooling(13, 14)
+    .optimize()
+    .rewrite();
+
+  ASSERT_EQ(tester.get_layout(0), xnn_layout_type_nhwc);
+  ASSERT_EQ(tester.get_layout(3), xnn_layout_type_nchw);
+  ASSERT_EQ(tester.get_layout(6), xnn_layout_type_nchw);
+  ASSERT_EQ(tester.get_layout(9), xnn_layout_type_nchw);
+  ASSERT_EQ(tester.get_layout(12), xnn_layout_type_nchw);
+  ASSERT_EQ(tester.get_layout(13), xnn_layout_type_nchw);
+  ASSERT_EQ(tester.get_layout(14), xnn_layout_type_nhwc);
 }
diff --git a/test/subgraph-tester.h b/test/subgraph-tester.h
index dd212b7..cc4a650 100644
--- a/test/subgraph-tester.h
+++ b/test/subgraph-tester.h
@@ -14,56 +14,59 @@
 #include <cstddef>
 #include <cstdlib>
 #include <functional>
-#include <limits>
 #include <random>
 #include <vector>
 
 #include <gtest/gtest.h>
 
 enum xnn_tensor_type {
-  kStatic = 0,
-  kDynamic = 1,
+  kStaticDense,
+  kStaticSparse,
+  kDynamic,
 };
 
 class SubgraphTester {
  public:
   explicit SubgraphTester(uint32_t external_value_ids) {
-    status_ = xnn_initialize(nullptr);
-    assert(xnn_status_success == status_);
+    xnn_status status = xnn_initialize(nullptr);
+    EXPECT_EQ(status, xnn_status_success);
 
-    const uint32_t flags = 0;
-    status_ = xnn_create_subgraph(external_value_ids, flags, &subgraph_ptr_);
-    assert(xnn_status_success == status_);
+    xnn_subgraph_t subgraph_ptr = nullptr;
+    status = xnn_create_subgraph(external_value_ids, 0 /* flags */, &subgraph_ptr);
+    EXPECT_EQ(status, xnn_status_success);
+    subgraph_.reset(subgraph_ptr);
 
     std::random_device random_device;
     rng_ = std::mt19937(random_device());
   }
 
-  ~SubgraphTester() {
-    status_ = xnn_delete_subgraph(subgraph_ptr_);
-    assert(xnn_status_success == status_);
-  }
-
   inline SubgraphTester& add_tensor(const std::vector<size_t>& dims,
                                     xnn_tensor_type tensor_type,
                                     uint32_t external_id) {
-    const uint32_t flags = 0;
-
-    auto f32rng = std::bind(std::uniform_real_distribution<float>(-1.0f, +1.0f),
-                            std::ref(rng_));
     void* data = nullptr;
-    if (tensor_type == kStatic) {
-      auto num_elements = std::accumulate(std::begin(dims), std::end(dims), 1,
-                                          std::multiplies<>());
-      std::vector<float> weights(num_elements);
-      std::generate(weights.begin(), weights.end(), std::ref(f32rng));
+    if (tensor_type == kStaticDense || tensor_type == kStaticSparse) {
+      const size_t num_elements = std::accumulate(std::begin(dims), std::end(dims), 1, std::multiplies<size_t>());
+      static_data_.emplace_back(num_elements);
+      std::vector<float>& weights = static_data_.back();
+      auto f32rng = std::bind(std::uniform_real_distribution<float>(-1.0f, +1.0f), std::ref(rng_));
+      if (tensor_type == kStaticDense) {
+        std::generate(weights.begin(), weights.end(), std::ref(f32rng));
+      } else {
+        // Create tensor with 90% sparsity in two steps:
+        // 1. Generate non-zero elements in the beginning of the vector
+        // 2. Randomize positions of non-zero elements
+        const size_t num_nonzero_elements = num_elements / 10;
+        std::generate(weights.begin(), weights.begin() + num_nonzero_elements, std::ref(f32rng));
+        std::shuffle(weights.begin(), weights.end(), rng_);
+      }
       data = weights.data();
     }
     uint32_t id_out = 0;
-    status_ =
-        xnn_define_tensor_value(subgraph_ptr_, xnn_datatype_fp32, dims.size(),
-                                dims.data(), data, external_id, flags, &id_out);
-    assert(xnn_status_success == status_);
+    const xnn_status status =
+        xnn_define_tensor_value(subgraph_.get(), xnn_datatype_fp32, dims.size(),
+                                dims.data(), data, external_id, 0 /* flags */, &id_out);
+    EXPECT_EQ(status, xnn_status_success);
+    EXPECT_EQ(id_out, external_id);
 
     return *this;
   }
@@ -76,18 +79,17 @@
       uint32_t dilation_height, uint32_t dilation_width, uint32_t groups,
       size_t group_input_channels, size_t group_output_channels,
       uint32_t input_id, uint32_t filter_id, uint32_t bias_id,
-      uint32_t output_id) {
-    const uint32_t flags = 0;
-
-    status_ = xnn_define_convolution_2d(
-        subgraph_ptr_, input_padding_top, input_padding_right,
+      uint32_t output_id)
+  {
+    const xnn_status status = xnn_define_convolution_2d(
+        subgraph_.get(), input_padding_top, input_padding_right,
         input_padding_bottom, input_padding_left, kernel_height, kernel_width,
         subsampling_height, subsampling_width, dilation_height, dilation_width,
         groups, group_input_channels, group_output_channels,
         -std::numeric_limits<float>::infinity(),
         std::numeric_limits<float>::infinity(), input_id, filter_id, bias_id,
-        output_id, flags);
-    assert(xnn_status_success == status_);
+        output_id, 0 /* flags */);
+    EXPECT_EQ(status, xnn_status_success);
 
     return *this;
   }
@@ -99,83 +101,61 @@
       uint32_t subsampling_height, uint32_t subsampling_width,
       uint32_t dilation_height, uint32_t dilation_width,
       uint32_t depth_multiplier, size_t input_channels, uint32_t input_id,
-      uint32_t filter_id, uint32_t bias_id, uint32_t output_id) {
-    const uint32_t flags = 0;
-
-    status_ = xnn_define_depthwise_convolution_2d(
-        subgraph_ptr_, input_padding_top, input_padding_right,
+      uint32_t filter_id, uint32_t bias_id, uint32_t output_id)
+  {
+    const xnn_status status = xnn_define_depthwise_convolution_2d(
+        subgraph_.get(), input_padding_top, input_padding_right,
         input_padding_bottom, input_padding_left, kernel_height, kernel_width,
         subsampling_height, subsampling_width, dilation_height, dilation_width,
         depth_multiplier, input_channels,
         -std::numeric_limits<float>::infinity(),
         std::numeric_limits<float>::infinity(), input_id, filter_id, bias_id,
-        output_id, flags);
-    assert(xnn_status_success == status_);
+        output_id, 0 /* flags */);
+    EXPECT_EQ(status, xnn_status_success);
 
     return *this;
   }
 
-  inline SubgraphTester& add_addition(uint32_t input_id1, uint32_t input_id2,
-                                      uint32_t output_id) {
-    const uint32_t flags = 0;
-
-    status_ =
-        xnn_define_add2(subgraph_ptr_, -std::numeric_limits<float>::infinity(),
+  inline SubgraphTester& add_addition(uint32_t input_id1, uint32_t input_id2, uint32_t output_id)
+  {
+    const xnn_status status =
+        xnn_define_add2(subgraph_.get(), -std::numeric_limits<float>::infinity(),
                         std::numeric_limits<float>::infinity(), input_id1,
-                        input_id2, output_id, flags);
-    assert(xnn_status_success == status_);
+                        input_id2, output_id, 0 /* flags */);
+    EXPECT_EQ(status, xnn_status_success);
 
     return *this;
   }
 
-  inline SubgraphTester& add_global_average_pooling(uint32_t input_id,
-                                                    uint32_t output_id) {
-    const uint32_t flags = 0;
-
-    status_ = xnn_define_global_average_pooling_2d(
-        subgraph_ptr_, -std::numeric_limits<float>::infinity(),
-        std::numeric_limits<float>::infinity(), input_id, output_id, flags);
-    assert(xnn_status_success == status_);
+  inline SubgraphTester& add_global_average_pooling(uint32_t input_id, uint32_t output_id)
+  {
+    const xnn_status status = xnn_define_global_average_pooling_2d(
+        subgraph_.get(), -std::numeric_limits<float>::infinity(),
+        std::numeric_limits<float>::infinity(), input_id, output_id, 0 /* flags */);
+    EXPECT_EQ(status, xnn_status_success);
 
     return *this;
   }
 
   inline SubgraphTester& optimize() {
-    const uint32_t flags = 0;
-    status_ = xnn_subgraph_optimize(subgraph_ptr_, flags);
-    assert(xnn_status_success == status_);
+    const xnn_status status = xnn_subgraph_optimize(subgraph_.get(), 0 /* flags */);
+    EXPECT_EQ(status, xnn_status_success);
 
     return *this;
   }
 
   inline SubgraphTester& rewrite() {
-    xnn_subgraph_rewrite_for_nchw(subgraph_ptr_);
+    xnn_subgraph_rewrite_for_nchw(subgraph_.get());
 
     return *this;
   }
 
-  void CheckLayouts(
-      std::map<uint32_t, std::pair<xnn_layout_type, xnn_layout_type>>&
-          expected_layouts) const {
-    for (auto const& item : expected_layouts) {
-      xnn_node* node = &subgraph_ptr_->nodes[item.first];
-
-      for (uint32_t i = 0; i < node->num_inputs; i++) {
-        struct xnn_value* value = &subgraph_ptr_->values[node->inputs[i]];
-        if (value->data != nullptr) {
-          continue;
-        }
-        ASSERT_EQ(item.second.first, value->layout);
-      }
-      for (uint32_t i = 0; i < node->num_outputs; i++) {
-        struct xnn_value* value = &subgraph_ptr_->values[node->outputs[i]];
-        ASSERT_EQ(item.second.second, value->layout);
-      }
-    }
+  inline xnn_layout_type get_layout(uint32_t value_id) const {
+    return subgraph_->values[value_id].layout;
   }
 
  private:
-  xnn_subgraph_t subgraph_ptr_ = nullptr;
+  std::vector<std::vector<float>> static_data_;
+  std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> subgraph_{nullptr, xnn_delete_subgraph};
   std::mt19937 rng_;
-  xnn_status status_;
 };
diff --git a/test/vunary-microkernel-tester.h b/test/vunary-microkernel-tester.h
index 4d47c16..1e93282 100644
--- a/test/vunary-microkernel-tester.h
+++ b/test/vunary-microkernel-tester.h
@@ -24,6 +24,7 @@
  public:
   enum class OpType {
     Abs,
+    ELU,
     LeakyReLU,
     Negate,
     ReLU,
@@ -69,6 +70,33 @@
     return this->slope_;
   }
 
+  inline VUnOpMicrokernelTester& prescale(float prescale) {
+    this->prescale_ = prescale;
+    return *this;
+  }
+
+  inline float prescale() const {
+    return this->prescale_;
+  }
+
+  inline VUnOpMicrokernelTester& alpha(float alpha) {
+    this->alpha_ = alpha;
+    return *this;
+  }
+
+  inline float alpha() const {
+    return this->alpha_;
+  }
+
+  inline VUnOpMicrokernelTester& beta(float beta) {
+    this->beta_ = beta;
+    return *this;
+  }
+
+  inline float beta() const {
+    return this->beta_;
+  }
+
   inline VUnOpMicrokernelTester& qmin(uint8_t qmin) {
     this->qmin_ = qmin;
     return *this;
@@ -101,6 +129,9 @@
     auto rng = std::mt19937(random_device());
     auto distribution = std::uniform_real_distribution<float>(-125.0f, 125.0f);
     switch (op_type) {
+      case OpType::ELU:
+        distribution = std::uniform_real_distribution<float>(-20.0f, 20.0f);
+        break;
       case OpType::SquareRoot:
         distribution = std::uniform_real_distribution<float>(0.0f, 10.0f);
         break;
@@ -127,6 +158,11 @@
           case OpType::Abs:
             y_ref[i] = std::abs(x_data[i]);
             break;
+          case OpType::ELU:
+          {
+            y_ref[i] = std::signbit(x_data[i]) ? alpha() * std::expm1(double(x_data[i]) * prescale()) : double(x_data[i]) * beta();
+            break;
+          }
           case OpType::LeakyReLU:
             y_ref[i] = std::signbit(x_data[i]) ? x_data[i] * slope() : x_data[i];
             break;
@@ -166,6 +202,7 @@
       // Prepare parameters.
       union {
         union xnn_f32_abs_params abs;
+        union xnn_f32_elu_params elu;
         union xnn_f32_relu_params relu;
         union xnn_f32_lrelu_params lrelu;
         union xnn_f32_neg_params neg;
@@ -183,6 +220,16 @@
               break;
           }
           break;
+        case OpType::ELU:
+          switch (variant) {
+            case Variant::Native:
+              params.elu = xnn_init_f32_elu_params(prescale(), alpha(), beta());
+              break;
+            case Variant::Scalar:
+              params.elu = xnn_init_scalar_f32_elu_params(prescale(), alpha(), beta());
+              break;
+          }
+          break;
         case OpType::LeakyReLU:
           switch (variant) {
             case Variant::Native:
@@ -244,10 +291,13 @@
   }
 
  private:
-  size_t batch_size_{1};
-  bool inplace_{false};
-  float slope_{0.5f};
-  uint8_t qmin_{0};
-  uint8_t qmax_{255};
-  size_t iterations_{15};
+  size_t batch_size_ = 1;
+  bool inplace_ = false;
+  float slope_ = 0.5f;
+  float prescale_ = 1.0f;
+  float alpha_ = 1.0f;
+  float beta_ = 1.0f;
+  uint8_t qmin_ = 0;
+  uint8_t qmax_ = 255;
+  size_t iterations_ = 15;
 };
diff --git a/third_party/cpuinfo.patch b/third_party/cpuinfo.patch
index 016a785..6b671d2 100644
--- a/third_party/cpuinfo.patch
+++ b/third_party/cpuinfo.patch
@@ -1,130 +1,595 @@
+diff --git CMakeLists.txt CMakeLists.txt
+index 06aee4d..6e42ab9 100644
+--- CMakeLists.txt
++++ CMakeLists.txt
+@@ -1,6 +1,4 @@
+-CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12 FATAL_ERROR)
+-
+-INCLUDE(GNUInstallDirs)
++CMAKE_MINIMUM_REQUIRED(VERSION 3.5 FATAL_ERROR)
+ 
+ # ---[ Project and semantic versioning.
+ PROJECT(cpuinfo C CXX)
+@@ -18,32 +16,22 @@ OPTION(CPUINFO_BUILD_MOCK_TESTS "Build cpuinfo mock tests" ON)
+ OPTION(CPUINFO_BUILD_BENCHMARKS "Build cpuinfo micro-benchmarks" ON)
+ 
+ # ---[ CMake options
++INCLUDE(GNUInstallDirs)
++
+ IF(CPUINFO_BUILD_UNIT_TESTS OR CPUINFO_BUILD_MOCK_TESTS)
+   ENABLE_TESTING()
+ ENDIF()
+ 
+ MACRO(CPUINFO_TARGET_ENABLE_C99 target)
+-  IF(${CMAKE_VERSION} VERSION_LESS "3.1")
+-    IF(NOT MSVC)
+-      TARGET_COMPILE_OPTIONS(${target} PRIVATE -std=c99)
+-    ENDIF()
+-  ELSE()
+-    SET_TARGET_PROPERTIES(${target} PROPERTIES
+-      C_STANDARD 99
+-      C_EXTENSIONS NO)
+-  ENDIF()
++  SET_TARGET_PROPERTIES(${target} PROPERTIES
++    C_STANDARD 99
++    C_EXTENSIONS NO)
+ ENDMACRO()
+ 
+ MACRO(CPUINFO_TARGET_ENABLE_CXX11 target)
+-  IF(${CMAKE_VERSION} VERSION_LESS "3.1")
+-    IF(NOT MSVC)
+-      TARGET_COMPILE_OPTIONS(${target} PRIVATE -std=c++11)
+-    ENDIF()
+-  ELSE()
+-    SET_TARGET_PROPERTIES(${target} PROPERTIES
+-      CXX_STANDARD 11
+-      CXX_EXTENSIONS NO)
+-  ENDIF()
++  SET_TARGET_PROPERTIES(${target} PROPERTIES
++    CXX_STANDARD 11
++    CXX_EXTENSIONS NO)
+ ENDMACRO()
+ 
+ MACRO(CPUINFO_TARGET_RUNTIME_LIBRARY target)
 diff --git include/cpuinfo.h include/cpuinfo.h
-index 6c67c34..85ce174 100644
+index e2e6564..cffa299 100644
 --- include/cpuinfo.h
 +++ include/cpuinfo.h
-@@ -417,6 +417,8 @@ enum cpuinfo_uarch {
- 	cpuinfo_uarch_cortex_a76   = 0x00300376,
- 	/** ARM Cortex-A77. */
- 	cpuinfo_uarch_cortex_a77   = 0x00300377,
-+	/** ARM Cortex-A78. */
-+	cpuinfo_uarch_cortex_a78   = 0x00300378,
+@@ -361,6 +361,8 @@ enum cpuinfo_uarch {
+ 	cpuinfo_uarch_zen         = 0x00200109,
+ 	/** AMD Zen 2 microarchitecture (7 nm Ryzen and EPYC CPUs). */
+ 	cpuinfo_uarch_zen2        = 0x0020010A,
++	/** AMD Zen 3 microarchitecture. */
++	cpuinfo_uarch_zen3        = 0x0020010B,
  
- 	/** ARM Neoverse N1. */
- 	cpuinfo_uarch_neoverse_n1  = 0x00300400,
-@@ -1434,6 +1436,7 @@ static inline bool cpuinfo_has_x86_sha(void) {
- 			bool armv6k;
- 			bool armv7;
- 			bool armv7mp;
-+			bool armv8;
- 			bool idiv;
+ 	/** NSC Geode and AMD Geode GX and LX. */
+ 	cpuinfo_uarch_geode  = 0x00200200,
+@@ -425,6 +427,9 @@ enum cpuinfo_uarch {
+ 	/** ARM Neoverse E1. */
+ 	cpuinfo_uarch_neoverse_e1  = 0x00300401,
  
- 			bool vfpv2;
-@@ -1521,6 +1524,16 @@ static inline bool cpuinfo_has_arm_v7mp(void) {
++	/** ARM Cortex-X1. */
++	cpuinfo_uarch_cortex_x1    = 0x00300500,
++
+ 	/** Qualcomm Scorpion. */
+ 	cpuinfo_uarch_scorpion = 0x00400100,
+ 	/** Qualcomm Krait. */
+@@ -1455,6 +1460,8 @@ static inline bool cpuinfo_has_x86_sha(void) {
+ 		#endif
+ 		#if CPUINFO_ARCH_ARM64
+ 			bool atomics;
++			bool sve;
++			bool sve2;
+ 		#endif
+ 		bool rdm;
+ 		bool fp16arith;
+@@ -1770,6 +1777,22 @@ static inline bool cpuinfo_has_arm_crc32(void) {
  	#endif
  }
  
-+static inline bool cpuinfo_has_arm_v8(void) {
++static inline bool cpuinfo_has_arm_sve(void) {
 +	#if CPUINFO_ARCH_ARM64
-+		return true;
-+	#elif CPUINFO_ARCH_ARM
-+		return cpuinfo_isa.armv8;
++		return cpuinfo_isa.sve;
 +	#else
 +		return false;
 +	#endif
 +}
 +
- static inline bool cpuinfo_has_arm_idiv(void) {
- 	#if CPUINFO_ARCH_ARM64
- 		return true;
-@@ -1645,6 +1658,16 @@ static inline bool cpuinfo_has_arm_neon_fma(void) {
- 	#endif
- }
- 
-+static inline bool cpuinfo_has_arm_neon_v8(void) {
++static inline bool cpuinfo_has_arm_sve2(void) {
 +	#if CPUINFO_ARCH_ARM64
-+		return true;
-+	#elif CPUINFO_ARCH_ARM
-+		return cpuinfo_isa.neon && cpuinfo_isa.armv8;
++		return cpuinfo_isa.sve2;
 +	#else
 +		return false;
 +	#endif
 +}
 +
- static inline bool cpuinfo_has_arm_atomics(void) {
- 	#if CPUINFO_ARCH_ARM64
- 		return cpuinfo_isa.atomics;
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void);
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void);
+ const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void);
 diff --git src/arm/linux/aarch32-isa.c src/arm/linux/aarch32-isa.c
-index 64dd168..41f9972 100644
+index 41f9972..df68aa1 100644
 --- src/arm/linux/aarch32-isa.c
 +++ src/arm/linux/aarch32-isa.c
-@@ -43,6 +43,7 @@ void cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo(
- 		isa->armv6k  = true;
- 		isa->armv7   = true;
- 		isa->armv7mp = true;
-+		isa->armv8   = true;
- 		isa->thumb  = true;
- 		isa->thumb2 = true;
- 		isa->idiv = true;
+@@ -56,24 +56,37 @@ void cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo(
+ 		/*
+ 		 * NEON FP16 compute extension and VQRDMLAH/VQRDMLSH instructions are not indicated in /proc/cpuinfo.
+ 		 * Use a MIDR-based heuristic to whitelist processors known to support it:
+-		 * - Processors with Qualcomm-modified Cortex-A55 cores
+-		 * - Processors with Qualcomm-modified Cortex-A75 cores
+-		 * - Processors with Qualcomm-modified Cortex-A76 cores
+-		 * - Kirin 980 processor
++		 * - Processors with Cortex-A55 cores
++		 * - Processors with Cortex-A65 cores
++		 * - Processors with Cortex-A75 cores
++		 * - Processors with Cortex-A76 cores
++		 * - Processors with Cortex-A77 cores
++		 * - Processors with Exynos M4 cores
++		 * - Processors with Exynos M5 cores
++		 * - Neoverse N1 cores
+ 		 */
+-		switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
+-			case UINT32_C(0x51008020): /* Kryo 385 Gold (Cortex-A75) */
+-			case UINT32_C(0x51008030): /* Kryo 385 Silver (Cortex-A55) */
+-			case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */
+-				isa->fp16arith = true;
+-				isa->rdm = true;
+-				break;
+-			default:
+-				if (chipset->series == cpuinfo_arm_chipset_series_hisilicon_kirin && chipset->model == 980) {
++		if (chipset->series == cpuinfo_arm_chipset_series_samsung_exynos && chipset->model == 9810) {
++			/* Only little cores of Exynos 9810 support FP16 & RDM */
++			cpuinfo_log_warning("FP16 arithmetics and RDM disabled: only little cores in Exynos 9810 support these extensions");
++		} else {
++			switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
++				case UINT32_C(0x4100D050): /* Cortex-A55 */
++				case UINT32_C(0x4100D060): /* Cortex-A65 */
++				case UINT32_C(0x4100D0B0): /* Cortex-A76 */
++				case UINT32_C(0x4100D0C0): /* Neoverse N1 */
++				case UINT32_C(0x4100D0D0): /* Cortex-A77 */
++				case UINT32_C(0x4100D0E0): /* Cortex-A76AE */
++				case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */
++				case UINT32_C(0x51008020): /* Kryo 385 Gold (Cortex-A75) */
++				case UINT32_C(0x51008030): /* Kryo 385 Silver (Cortex-A55) */
++				case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */
++				case UINT32_C(0x51008050): /* Kryo 485 Silver (Cortex-A55) */
++				case UINT32_C(0x53000030): /* Exynos M4 */
++				case UINT32_C(0x53000040): /* Exynos M5 */
+ 					isa->fp16arith = true;
+ 					isa->rdm = true;
+-				}
+-				break;
++					break;
++			}
+ 		}
+ 
+ 		/*
+diff --git src/arm/linux/aarch64-isa.c src/arm/linux/aarch64-isa.c
+index 619cda5..2000e1a 100644
+--- src/arm/linux/aarch64-isa.c
++++ src/arm/linux/aarch64-isa.c
+@@ -6,6 +6,7 @@
+ 
+ void cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo(
+ 	uint32_t features,
++	uint32_t features2,
+ 	uint32_t midr,
+ 	const struct cpuinfo_arm_chipset chipset[restrict static 1],
+ 	struct cpuinfo_arm_isa isa[restrict static 1])
+@@ -28,43 +29,56 @@ void cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo(
+ 	if (features & CPUINFO_ARM_LINUX_FEATURE_ATOMICS) {
+ 		isa->atomics = true;
+ 	}
+-	const uint32_t fp16arith_mask = CPUINFO_ARM_LINUX_FEATURE_FPHP | CPUINFO_ARM_LINUX_FEATURE_ASIMDHP;
+-	if ((features & fp16arith_mask) == fp16arith_mask) {
+-		if (chipset->series == cpuinfo_arm_chipset_series_samsung_exynos && chipset->model == 9810) {
+-			/* Exynos 9810 reports that it supports FP16 compute, but in fact only little cores do */
+-			cpuinfo_log_warning("FP16 arithmetics disabled: only little cores of Exynos 9810 support FP16 compute");
+-		} else {
+-			isa->fp16arith = true;
+-		}
+-	} else if (features & CPUINFO_ARM_LINUX_FEATURE_FPHP) {
+-		cpuinfo_log_warning("FP16 arithmetics disabled: detected support only for scalar operations");
+-	} else if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDHP) {
+-		cpuinfo_log_warning("FP16 arithmetics disabled: detected support only for SIMD operations");
+-	}
++
+ 	/*
+-	 * Many phones ship with an old kernel configuration that doesn't report
+-	 * SQRDMLAH/SQRDMLSH/UQRDMLAH/UQRDMLSH instructions.
++	 * Some phones ship with an old kernel configuration that doesn't report NEON FP16 compute extension and SQRDMLAH/SQRDMLSH/UQRDMLAH/UQRDMLSH instructions.
+ 	 * Use a MIDR-based heuristic to whitelist processors known to support it:
+-	 * - Processors with Qualcomm-modified Cortex-A55 cores
+-	 * - Processors with Qualcomm-modified Cortex-A75 cores
+-	 * - Processors with Qualcomm-modified Cortex-A76 cores
+-	 * - Kirin 980 processor
++	 * - Processors with Cortex-A55 cores
++	 * - Processors with Cortex-A65 cores
++	 * - Processors with Cortex-A75 cores
++	 * - Processors with Cortex-A76 cores
++	 * - Processors with Cortex-A77 cores
++	 * - Processors with Exynos M4 cores
++	 * - Processors with Exynos M5 cores
++	 * - Neoverse N1 cores
+ 	 */
+-	switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
+-		case UINT32_C(0x51008020): /* Kryo 385 Gold (Cortex-A75) */
+-		case UINT32_C(0x51008030): /* Kryo 385 Silver (Cortex-A55) */
+-		case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */
+-			isa->rdm = true;
+-			break;
+-		default:
+-			if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDRDM) {
+-				isa->rdm = true;
+-			}
+-			if (chipset->series == cpuinfo_arm_chipset_series_hisilicon_kirin && chipset->model == 980) {
++	if (chipset->series == cpuinfo_arm_chipset_series_samsung_exynos && chipset->model == 9810) {
++		/* Exynos 9810 reports that it supports FP16 compute, but in fact only little cores do */
++		cpuinfo_log_warning("FP16 arithmetics and RDM disabled: only little cores in Exynos 9810 support these extensions");
++	} else {
++		const uint32_t fp16arith_mask = CPUINFO_ARM_LINUX_FEATURE_FPHP | CPUINFO_ARM_LINUX_FEATURE_ASIMDHP;
++		switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
++			case UINT32_C(0x4100D050): /* Cortex-A55 */
++			case UINT32_C(0x4100D060): /* Cortex-A65 */
++			case UINT32_C(0x4100D0B0): /* Cortex-A76 */
++			case UINT32_C(0x4100D0C0): /* Neoverse N1 */
++			case UINT32_C(0x4100D0D0): /* Cortex-A77 */
++			case UINT32_C(0x4100D0E0): /* Cortex-A76AE */
++			case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */
++			case UINT32_C(0x51008020): /* Kryo 385 Gold (Cortex-A75) */
++			case UINT32_C(0x51008030): /* Kryo 385 Silver (Cortex-A55) */
++			case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */
++			case UINT32_C(0x51008050): /* Kryo 485 Silver (Cortex-A55) */
++			case UINT32_C(0x53000030): /* Exynos M4 */
++			case UINT32_C(0x53000040): /* Exynos M5 */
++				isa->fp16arith = true;
+ 				isa->rdm = true;
+-			}
+-			break;
++				break;
++			default:
++				if ((features & fp16arith_mask) == fp16arith_mask) {
++					isa->fp16arith = true;
++				} else if (features & CPUINFO_ARM_LINUX_FEATURE_FPHP) {
++					cpuinfo_log_warning("FP16 arithmetics disabled: detected support only for scalar operations");
++				} else if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDHP) {
++					cpuinfo_log_warning("FP16 arithmetics disabled: detected support only for SIMD operations");
++				}
++				if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDRDM) {
++					isa->rdm = true;
++				}
++				break;
++		}
+ 	}
++
+ 	/*
+ 	 * Many phones ship with an old kernel configuration that doesn't report UDOT/SDOT instructions.
+ 	 * Use a MIDR-based heuristic to whitelist processors known to support it.
+@@ -98,13 +112,16 @@ void cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo(
+ 	if (features & CPUINFO_ARM_LINUX_FEATURE_JSCVT) {
+ 		isa->jscvt = true;
+ 	}
+-	if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDRDM) {
+-		isa->rdm = true;
+-	}
+ 	if (features & CPUINFO_ARM_LINUX_FEATURE_JSCVT) {
+ 		isa->jscvt = true;
+ 	}
+ 	if (features & CPUINFO_ARM_LINUX_FEATURE_FCMA) {
+ 		isa->fcma = true;
+ 	}
++	if (features & CPUINFO_ARM_LINUX_FEATURE_SVE) {
++		isa->sve = true;
++	}
++	if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SVE2) {
++		isa->sve2 = true;
++	}
+ }
+diff --git src/arm/linux/api.h src/arm/linux/api.h
+index 2597e49..1c09f82 100644
+--- src/arm/linux/api.h
++++ src/arm/linux/api.h
+@@ -111,6 +111,28 @@ struct cpuinfo_arm_linux_proc_cpuinfo_cache {
+ 	#define CPUINFO_ARM_LINUX_FEATURE_ILRCPC   UINT32_C(0x04000000)
+ 	#define CPUINFO_ARM_LINUX_FEATURE_FLAGM    UINT32_C(0x08000000)
+ 	#define CPUINFO_ARM_LINUX_FEATURE_SSBS     UINT32_C(0x10000000)
++	#define CPUINFO_ARM_LINUX_FEATURE_SB       UINT32_C(0x20000000)
++	#define CPUINFO_ARM_LINUX_FEATURE_PACA     UINT32_C(0x40000000)
++	#define CPUINFO_ARM_LINUX_FEATURE_PACG     UINT32_C(0x80000000)
++
++	#define CPUINFO_ARM_LINUX_FEATURE2_DCPODP     UINT32_C(0x00000001)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVE2       UINT32_C(0x00000002)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVEAES     UINT32_C(0x00000004)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVEPMULL   UINT32_C(0x00000008)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVEBITPERM UINT32_C(0x00000010)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVESHA3    UINT32_C(0x00000020)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVESM4     UINT32_C(0x00000040)
++	#define CPUINFO_ARM_LINUX_FEATURE2_FLAGM2     UINT32_C(0x00000080)
++	#define CPUINFO_ARM_LINUX_FEATURE2_FRINT      UINT32_C(0x00000100)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVEI8MM    UINT32_C(0x00000200)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVEF32MM   UINT32_C(0x00000400)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVEF64MM   UINT32_C(0x00000800)
++	#define CPUINFO_ARM_LINUX_FEATURE2_SVEBF16    UINT32_C(0x00001000)
++	#define CPUINFO_ARM_LINUX_FEATURE2_I8MM       UINT32_C(0x00002000)
++	#define CPUINFO_ARM_LINUX_FEATURE2_BF16       UINT32_C(0x00004000)
++	#define CPUINFO_ARM_LINUX_FEATURE2_DGH        UINT32_C(0x00008000)
++	#define CPUINFO_ARM_LINUX_FEATURE2_RNG        UINT32_C(0x00010000)
++	#define CPUINFO_ARM_LINUX_FEATURE2_BTI        UINT32_C(0x00020000)
+ #endif
+ 
+ #define CPUINFO_ARM_LINUX_VALID_ARCHITECTURE UINT32_C(0x00010000)
+@@ -146,9 +168,7 @@ struct cpuinfo_arm_linux_processor {
+ 	struct cpuinfo_arm_linux_proc_cpuinfo_cache proc_cpuinfo_cache;
+ #endif
+ 	uint32_t features;
+-#if CPUINFO_ARCH_ARM
+ 	uint32_t features2;
+-#endif
+ 	/**
+ 	 * Main ID Register value.
+ 	 */
+@@ -282,9 +302,13 @@ CPUINFO_INTERNAL bool cpuinfo_arm_linux_parse_proc_cpuinfo(
+ 		const struct cpuinfo_arm_chipset chipset[restrict static 1],
+ 		struct cpuinfo_arm_isa isa[restrict static 1]);
+ #elif CPUINFO_ARCH_ARM64
+-	CPUINFO_INTERNAL uint32_t cpuinfo_arm_linux_hwcap_from_getauxval(void);
++	CPUINFO_INTERNAL void cpuinfo_arm_linux_hwcap_from_getauxval(
++		uint32_t hwcap[restrict static 1],
++		uint32_t hwcap2[restrict static 1]);
++
+ 	CPUINFO_INTERNAL void cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo(
+ 		uint32_t features,
++		uint32_t features2,
+ 		uint32_t midr,
+ 		const struct cpuinfo_arm_chipset chipset[restrict static 1],
+ 		struct cpuinfo_arm_isa isa[restrict static 1]);
+diff --git src/arm/linux/hwcap.c src/arm/linux/hwcap.c
+index 36d0d91..35e9994 100644
+--- src/arm/linux/hwcap.c
++++ src/arm/linux/hwcap.c
+@@ -29,12 +29,10 @@
+ 		mock_hwcap = hwcap;
+ 	}
+ 
+-	#if CPUINFO_ARCH_ARM
+-		static uint32_t mock_hwcap2 = 0;
+-		void cpuinfo_set_hwcap2(uint32_t hwcap2) {
+-			mock_hwcap2 = hwcap2;
+-		}
+-	#endif
++	static uint32_t mock_hwcap2 = 0;
++	void cpuinfo_set_hwcap2(uint32_t hwcap2) {
++		mock_hwcap2 = hwcap2;
++	}
+ #endif
+ 
+ 
+@@ -145,11 +143,17 @@
+ 		}
+ 	#endif /* __ANDROID__ */
+ #elif CPUINFO_ARCH_ARM64
+-	uint32_t cpuinfo_arm_linux_hwcap_from_getauxval(void) {
++	void cpuinfo_arm_linux_hwcap_from_getauxval(
++		uint32_t hwcap[restrict static 1],
++		uint32_t hwcap2[restrict static 1])
++	{
+ 		#if CPUINFO_MOCK
+-			return mock_hwcap;
++			*hwcap  = mock_hwcap;
++			*hwcap2 = mock_hwcap2;
+ 		#else
+-			return (uint32_t) getauxval(AT_HWCAP);
++			*hwcap  = (uint32_t) getauxval(AT_HWCAP);
++			*hwcap2 = (uint32_t) getauxval(AT_HWCAP2);
++			return ;
+ 		#endif
+ 	}
+ #endif
+diff --git src/arm/linux/init.c src/arm/linux/init.c
+index 89d957e..23d8439 100644
+--- src/arm/linux/init.c
++++ src/arm/linux/init.c
+@@ -277,10 +277,11 @@ void cpuinfo_arm_linux_init(void) {
+ 			last_midr, last_architecture_version, last_architecture_flags,
+ 			&chipset, &cpuinfo_isa);
+ 	#elif CPUINFO_ARCH_ARM64
++		uint32_t isa_features = 0, isa_features2 = 0;
+ 		/* getauxval is always available on ARM64 Android */
+-		const uint32_t isa_features = cpuinfo_arm_linux_hwcap_from_getauxval();
++		cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2);
+ 		cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo(
+-			isa_features, last_midr, &chipset, &cpuinfo_isa);
++			isa_features, isa_features2, last_midr, &chipset, &cpuinfo_isa);
+ 	#endif
+ 
+ 	/* Detect min/max frequency and package ID */
 diff --git src/arm/mach/init.c src/arm/mach/init.c
-index 058cfc2..e912de6 100644
+index d820744..dbea578 100644
 --- src/arm/mach/init.c
 +++ src/arm/mach/init.c
-@@ -307,6 +307,7 @@ void cpuinfo_arm_mach_init(void) {
- 		case CPU_TYPE_ARM:
- 			switch (cpu_subtype) {
- 				case CPU_SUBTYPE_ARM_V8:
-+					cpuinfo_isa.armv8 = true;
- 					cpuinfo_isa.aes = true;
- 					cpuinfo_isa.sha1 = true;
- 					cpuinfo_isa.sha2 = true;
+@@ -24,7 +24,6 @@
+ #ifndef CPUFAMILY_ARM_LIGHTNING_THUNDER
+ 	#define CPUFAMILY_ARM_LIGHTNING_THUNDER 0x462504D2
+ #endif
+-
+ #ifndef CPUFAMILY_ARM_FIRESTORM_ICESTORM
+ 	#define CPUFAMILY_ARM_FIRESTORM_ICESTORM 0x1B588BB3
+ #endif
+@@ -349,6 +348,7 @@ void cpuinfo_arm_mach_init(void) {
+ 		case CPUFAMILY_ARM_MONSOON_MISTRAL:
+ 		case CPUFAMILY_ARM_VORTEX_TEMPEST:
+ 		case CPUFAMILY_ARM_LIGHTNING_THUNDER:
++		case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
+ 			#if CPUINFO_ARCH_ARM64
+ 				cpuinfo_isa.atomics = true;
+ 			#endif
+@@ -360,8 +360,10 @@ void cpuinfo_arm_mach_init(void) {
+ 	 * ARMv8.2 optional dot-product instructions, so we currently whitelist CPUs
+ 	 * known to support these instruction.
+ 	 */
+-	if (cpu_family == CPUFAMILY_ARM_LIGHTNING_THUNDER) {
+-		cpuinfo_isa.dot = true;
++	switch (cpu_family) {
++		case CPUFAMILY_ARM_LIGHTNING_THUNDER:
++		case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
++			cpuinfo_isa.dot = true;
+ 	}
+ 
+ 	uint32_t num_clusters = 1;
 diff --git src/arm/midr.h src/arm/midr.h
-index 34d7780..2638517 100644
+index 2638517..739dc19 100644
 --- src/arm/midr.h
 +++ src/arm/midr.h
-@@ -183,6 +183,7 @@ inline static uint32_t midr_score_core(uint32_t midr) {
- 		case UINT32_C(0x51008000): /* Kryo 260 / 280 Gold */
- 		case UINT32_C(0x51002050): /* Kryo Gold */
- 		case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */
-+		case UINT32_C(0x4100D410): /* Cortex-A78 */
- 		case UINT32_C(0x4100D0D0): /* Cortex-A77 */
- 		case UINT32_C(0x4100D0E0): /* Cortex-A76AE */
- 		case UINT32_C(0x4100D0B0): /* Cortex-A76 */
+@@ -171,9 +171,10 @@ inline static bool midr_is_kryo_gold(uint32_t midr) {
+ inline static uint32_t midr_score_core(uint32_t midr) {
+ 	const uint32_t core_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
+ 	switch (midr & core_mask) {
+-		case UINT32_C(0x53000040): /* Exynos M5 */
+ 		case UINT32_C(0x53000030): /* Exynos M4 */
+-			/* These cores are in big role w.r.t Cortex-A75 or Cortex-A76 */
++		case UINT32_C(0x53000040): /* Exynos M5 */
++		case UINT32_C(0x4100D440): /* Cortex-X1 */
++			/* These cores are in big role w.r.t Cortex-A75/-A76/-A77/-A78 */
+ 			return 6;
+ 		case UINT32_C(0x4E000030): /* Denver 2 */
+ 		case UINT32_C(0x53000010): /* Exynos M1 and Exynos M2 */
 diff --git src/arm/uarch.c src/arm/uarch.c
-index 55b61df..0d7a7d7 100644
+index 0d7a7d7..8b5362b 100644
 --- src/arm/uarch.c
 +++ src/arm/uarch.c
-@@ -91,6 +91,9 @@ void cpuinfo_arm_decode_vendor_uarch(
- 				case 0xD0E: /* Cortex-A76AE */
- 					*uarch = cpuinfo_uarch_cortex_a76;
+@@ -94,6 +94,9 @@ void cpuinfo_arm_decode_vendor_uarch(
+ 				case 0xD41: /* Cortex-A78 */
+ 					*uarch = cpuinfo_uarch_cortex_a78;
  					break;
-+				case 0xD41: /* Cortex-A78 */
-+					*uarch = cpuinfo_uarch_cortex_a78;
++				case 0xD44: /* Cortex-X1 */
++					*uarch = cpuinfo_uarch_cortex_x1;
 +					break;
  #if CPUINFO_ARCH_ARM64 && !defined(__ANDROID__)
  				case 0xD4A:
  					*uarch = cpuinfo_uarch_neoverse_e1;
+diff --git src/init.c src/init.c
+index f703e8e..d61e7be 100644
+--- src/init.c
++++ src/init.c
+@@ -35,8 +35,6 @@ bool CPUINFO_ABI cpuinfo_initialize(void) {
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+ 	#if defined(__linux__)
+ 		pthread_once(&init_guard, &cpuinfo_arm_linux_init);
+-	#elif defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
+-		pthread_once(&init_guard, &cpuinfo_arm_mach_init);
+ 	#elif defined(__MACH__) && defined(__APPLE__)
+ 		pthread_once(&init_guard, &cpuinfo_arm_mach_init);
+ 	#else
+diff --git src/x86/uarch.c src/x86/uarch.c
+index ecaa762..3705499 100644
+--- src/x86/uarch.c
++++ src/x86/uarch.c
+@@ -209,9 +209,23 @@ enum cpuinfo_uarch cpuinfo_x86_decode_uarch(
+ 							return cpuinfo_uarch_zen;
+ 						case 0x31: // Rome, Castle Peak
+ 						case 0x60: // Renoir
++						case 0x68: // Lucienne
+ 						case 0x71: // Matisse
++						case 0x90: // Van Gogh
++						case 0x98: // Mero
+ 							return cpuinfo_uarch_zen2;
+ 					}
++					break;
++				case 0x19:
++					switch (model_info->model) {
++						case 0x01: // Genesis
++						case 0x21: // Vermeer
++						case 0x30: // Badami, Trento
++						case 0x40: // Rembrandt
++						case 0x50: // Cezanne
++							return cpuinfo_uarch_zen3;
++					}
++					break;
+ 			}
+ 			break;
+ 		case cpuinfo_vendor_hygon:
+diff --git src/x86/windows/init.c src/x86/windows/init.c
+index 9a23bd7..274075c 100644
+--- src/x86/windows/init.c
++++ src/x86/windows/init.c
+@@ -95,6 +95,15 @@ static void cpuinfo_x86_count_caches(
+ 	*l4_count_ptr  = l4_count;
+ }
+ 
++static bool cpuinfo_x86_windows_is_wine(void) {
++	HMODULE ntdll = GetModuleHandleW(L"ntdll.dll");
++	if (ntdll == NULL) {
++		return false;
++	}
++
++	return GetProcAddress(ntdll, "wine_get_version") != NULL;
++}
++
+ BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
+ 	struct cpuinfo_processor* processors = NULL;
+ 	struct cpuinfo_core* cores = NULL;
+@@ -108,6 +117,7 @@ BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PV
+ 	PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX processor_infos = NULL;
+ 
+ 	HANDLE heap = GetProcessHeap();
++	const bool is_wine = cpuinfo_x86_windows_is_wine();
+ 
+ 	struct cpuinfo_x86_processor x86_processor;
+ 	ZeroMemory(&x86_processor, sizeof(x86_processor));
+@@ -121,7 +131,8 @@ BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PV
+ 		x86_processor.topology.thread_bits_offset + x86_processor.topology.thread_bits_length,
+ 		x86_processor.topology.core_bits_offset + x86_processor.topology.core_bits_length);
+ 
+-	const uint32_t max_group_count = (uint32_t) GetMaximumProcessorGroupCount();
++	/* WINE doesn't implement GetMaximumProcessorGroupCount and aborts when calling it */
++	const uint32_t max_group_count = is_wine ? 1 : (uint32_t) GetMaximumProcessorGroupCount();
+ 	cpuinfo_log_debug("detected %"PRIu32" processor groups", max_group_count);
+ 
+ 	uint32_t processors_count = 0;
+diff --git test/mock/galaxy-s9-us.cc test/mock/galaxy-s9-us.cc
+index ceea969..91c4868 100644
+--- test/mock/galaxy-s9-us.cc
++++ test/mock/galaxy-s9-us.cc
+@@ -817,4 +817,4 @@ int main(int argc, char* argv[]) {
+ 	cpuinfo_initialize();
+ 	::testing::InitGoogleTest(&argc, argv);
+ 	return RUN_ALL_TESTS();
+-}
++}
+\ No newline at end of file
 diff --git tools/cpu-info.c tools/cpu-info.c
-index 2759068..429bbfa 100644
+index 55d654f..30ec633 100644
 --- tools/cpu-info.c
 +++ tools/cpu-info.c
-@@ -183,6 +183,8 @@ static const char* uarch_to_string(enum cpuinfo_uarch uarch) {
- 			return "Cortex-A76";
- 		case cpuinfo_uarch_cortex_a77:
+@@ -129,6 +129,8 @@ static const char* uarch_to_string(enum cpuinfo_uarch uarch) {
+ 			return "Zen";
+ 		case cpuinfo_uarch_zen2:
+ 			return "Zen 2";
++		case cpuinfo_uarch_zen3:
++			return "Zen 3";
+ 		case cpuinfo_uarch_geode:
+ 			return "Geode";
+ 		case cpuinfo_uarch_bobcat:
+@@ -185,6 +187,8 @@ static const char* uarch_to_string(enum cpuinfo_uarch uarch) {
  			return "Cortex-A77";
-+		case cpuinfo_uarch_cortex_a78:
-+			return "Cortex-A78";
+ 		case cpuinfo_uarch_cortex_a78:
+ 			return "Cortex-A78";
++		case cpuinfo_uarch_cortex_x1:
++			return "Cortex-X1";
  		case cpuinfo_uarch_scorpion:
  			return "Scorpion";
  		case cpuinfo_uarch_krait:
 diff --git tools/isa-info.c tools/isa-info.c
-index 98ef919..8365846 100644
+index 8365846..92abb57 100644
 --- tools/isa-info.c
 +++ tools/isa-info.c
-@@ -121,6 +121,7 @@ int main(int argc, char** argv) {
- 		printf("\tARMv6-K: %s\n", cpuinfo_has_arm_v6k() ? "yes" : "no");
- 		printf("\tARMv7: %s\n", cpuinfo_has_arm_v7() ? "yes" : "no");
- 		printf("\tARMv7 MP: %s\n", cpuinfo_has_arm_v7mp() ? "yes" : "no");
-+		printf("\tARMv8: %s\n", cpuinfo_has_arm_v8() ? "yes" : "no");
- 		printf("\tIDIV: %s\n", cpuinfo_has_arm_idiv() ? "yes" : "no");
+@@ -161,6 +161,10 @@ int main(int argc, char** argv) {
+ 		printf("\tARM v8.3 JS conversion: %s\n", cpuinfo_has_arm_jscvt() ? "yes" : "no");
+ 		printf("\tARM v8.3 complex: %s\n", cpuinfo_has_arm_fcma() ? "yes" : "no");
  
- 	printf("Floating-Point support:\n");
++	printf("SIMD extensions:\n");
++		printf("\tARM SVE: %s\n", cpuinfo_has_arm_sve() ? "yes" : "no");
++		printf("\tARM SVE 2: %s\n", cpuinfo_has_arm_sve2() ? "yes" : "no");
++
+ 	printf("Cryptography extensions:\n");
+ 		printf("\tAES: %s\n", cpuinfo_has_arm_aes() ? "yes" : "no");
+ 		printf("\tSHA1: %s\n", cpuinfo_has_arm_sha1() ? "yes" : "no");
diff --git a/third_party/psimd.BUILD b/third_party/psimd.BUILD
deleted file mode 100644
index 1cd0b27..0000000
--- a/third_party/psimd.BUILD
+++ /dev/null
@@ -1,32 +0,0 @@
-# Description:
-#   Portable 128-bit SIMD intrinsics
-
-package(default_visibility = ["//visibility:public"])
-
-licenses(["notice"])
-
-exports_files(["LICENSE"])
-
-cc_library(
-    name = "psimd",
-    hdrs = glob(["include/psimd.h"]),
-    includes = ["include"],
-    defines = select({
-        ":psimd_enable_wasm_qfma_explicit_true": ["PSIMD_ENABLE_WASM_QFMA=1"],
-        ":psimd_enable_wasm_qfma_explicit_false": ["PSIMD_ENABLE_WASM_QFMA=0"],
-        "//conditions:default": ["PSIMD_ENABLE_WASM_QFMA=0"],
-    }),
-    strip_include_prefix = "include",
-)
-
-# Enables usage of QFMA WAsm SIMD instructions.
-config_setting(
-    name = "psimd_enable_wasm_qfma_explicit_true",
-    define_values = {"psimd_enable_wasm_qfma": "true"},
-)
-
-# Disables usage of QFMA WAsm SIMD instructions.
-config_setting(
-    name = "psimd_enable_wasm_qfma_explicit_false",
-    define_values = {"psimd_enable_wasm_qfma": "false"},
-)
diff --git a/tools/generate-argmaxpool-test.py b/tools/generate-argmaxpool-test.py
index 44a5adb..1eef705 100755
--- a/tools/generate-argmaxpool-test.py
+++ b/tools/generate-argmaxpool-test.py
@@ -681,7 +681,7 @@
   _, test_name = ukernel.split("_", 1)
   _, datatype, ukernel_type, _ = ukernel.split("_", 3)
   test_args = [ukernel]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("ArgMaxPoolMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(ARGMAXPOOL_TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-avgpool-test.py b/tools/generate-avgpool-test.py
index e62ad73..90a716d 100755
--- a/tools/generate-avgpool-test.py
+++ b/tools/generate-avgpool-test.py
@@ -2413,7 +2413,7 @@
   _, test_name = ukernel.split("_", 1)
   _, datatype, ukernel_type, _ = ukernel.split("_", 3)
   test_args = [ukernel]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("AvgPoolMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(AVGPOOL_TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-clamp-test.py b/tools/generate-clamp-test.py
index e82d213..5a88f93 100755
--- a/tools/generate-clamp-test.py
+++ b/tools/generate-clamp-test.py
@@ -133,7 +133,7 @@
   _, test_name = ukernel.split("_", 1)
   _, datatype, _ = ukernel.split("_", 2)
   test_args = [ukernel]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("ClampMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(CLAMP_TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-dwconv-test.py b/tools/generate-dwconv-test.py
index 4f8c337..edbe471 100755
--- a/tools/generate-dwconv-test.py
+++ b/tools/generate-dwconv-test.py
@@ -306,7 +306,7 @@
   if activation == "ukernel":
     activation = "linear"
   test_args = [ukernel]
-  if activation != "linear" and (not isa or isa == "psimd"):
+  if activation != "linear" and not isa:
     test_args.append("DWConvMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(DWCONV_TEST_CODE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-dwconv2d-chw-test.py b/tools/generate-dwconv2d-chw-test.py
index 0c215da..0b79784 100755
--- a/tools/generate-dwconv2d-chw-test.py
+++ b/tools/generate-dwconv2d-chw-test.py
@@ -269,7 +269,7 @@
   _, test_name = ukernel.split("_", 1)
   _, datatype, ukernel_type, _ = ukernel.split("_", 3)
   test_args = [ukernel]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("DWConv2DMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-gavgpool-test.py b/tools/generate-gavgpool-test.py
index 2b93445..599dff2 100755
--- a/tools/generate-gavgpool-test.py
+++ b/tools/generate-gavgpool-test.py
@@ -527,7 +527,7 @@
   _, test_name = ukernel.split("_", 1)
   _, datatype, ukernel_type, _ = ukernel.split("_", 3)
   test_args = [ukernel]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("GAvgPoolMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(AVGPOOL_TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-gemm-test.py b/tools/generate-gemm-test.py
index d64f5b9..0d6b28a 100755
--- a/tools/generate-gemm-test.py
+++ b/tools/generate-gemm-test.py
@@ -785,7 +785,7 @@
   if activation == "ukernel":
     activation = "linear"
   test_args = [ukernel]
-  if activation not in ["linear", "relu"] and (not isa or isa == "psimd"):
+  if activation not in ["linear", "relu"] and not isa:
     test_args.append("GemmMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(GEMM_TEST_CODE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-hswish-test.py b/tools/generate-hswish-test.py
index 2213dce..9c3a9ab 100755
--- a/tools/generate-hswish-test.py
+++ b/tools/generate-hswish-test.py
@@ -105,7 +105,7 @@
   _, test_name = ukernel.split("_", 1)
   _, datatype, _ = ukernel.split("_", 2)
   test_args = [ukernel]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("HSwishMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(HSWISH_TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-maxpool-test.py b/tools/generate-maxpool-test.py
index ab37635..8a15c62 100755
--- a/tools/generate-maxpool-test.py
+++ b/tools/generate-maxpool-test.py
@@ -1020,7 +1020,7 @@
   _, test_name = ukernel.split("_", 1)
   _, datatype, ukernel_type, _ = ukernel.split("_", 3)
   test_args = [ukernel]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("MaxPoolMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(MAXPOOL_TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-spmm-test.py b/tools/generate-spmm-test.py
index ae39200..884594e 100755
--- a/tools/generate-spmm-test.py
+++ b/tools/generate-spmm-test.py
@@ -398,7 +398,7 @@
   _, test_name = ukernel.split("_", 1)
   _, datatype, ukernel_type, _ = ukernel.split("_", 3)
   test_args = [ukernel]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("SpMMMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-vbinary-test.py b/tools/generate-vbinary-test.py
index dd6526d..2041805 100755
--- a/tools/generate-vbinary-test.py
+++ b/tools/generate-vbinary-test.py
@@ -275,7 +275,7 @@
   test_args = [ukernel]
   if tester in ["VBinOpMicrokernelTester", "VBinOpCMicrokernelTester"]:
     test_args.append("%s::OpType::%s" % (tester, op_type))
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("%s::Variant::Scalar" % tester)
   return xngen.preprocess(BINOP_TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-vmulcaddc-test.py b/tools/generate-vmulcaddc-test.py
index 71f893b..ade052d 100755
--- a/tools/generate-vmulcaddc-test.py
+++ b/tools/generate-vmulcaddc-test.py
@@ -223,7 +223,7 @@
   _, test_name = ukernel.split("_", 1)
   _, datatype, ukernel_type, _ = ukernel.split("_", 3)
   test_args = [ukernel]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("VMulCAddCMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(VMULCADDC_TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/generate-vunary-test.py b/tools/generate-vunary-test.py
index 44a9781..23d6898 100755
--- a/tools/generate-vunary-test.py
+++ b/tools/generate-vunary-test.py
@@ -27,12 +27,13 @@
 
 
 def split_ukernel_name(name):
-  match = re.match(r"^xnn_(f16|f32)_(relu|sigmoid|vabs|vlrelu|vneg|vsqr|vrndne|vrndz|vrndd|vrndu|vsqrt)_(fact_)?ukernel__(.+)_x(\d+)$", name)
+  match = re.match(r"^xnn_(f16|f32)_(relu|sigmoid|vabs|velu|vlrelu|vneg|vsqr|vrndne|vrndz|vrndd|vrndu|vsqrt)_(fact_)?ukernel__(.+)_x(\d+)$", name)
   if match is None:
     raise ValueError("Unexpected microkernel name: " + name)
   op_type = {
     "relu": "ReLU",
     "vabs": "Abs",
+    "velu": "ELU",
     "vlrelu": "LeakyReLU",
     "vneg": "Negate",
     "sigmoid": "Sigmoid",
@@ -113,6 +114,46 @@
       }
     }
   }
+
+$if OP_TYPE == "ELU":
+  TEST(${TEST_NAME}, prescale) {
+    $if ISA_CHECK:
+      ${ISA_CHECK};
+    for (float prescale : std::vector<float>({0.1f, 10.0f})) {
+      for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .prescale(prescale)
+          .Test(${", ".join(TEST_ARGS)});
+      }
+    }
+  }
+
+  TEST(${TEST_NAME}, alpha) {
+    $if ISA_CHECK:
+      ${ISA_CHECK};
+    for (float alpha : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .alpha(alpha)
+          .Test(${", ".join(TEST_ARGS)});
+      }
+    }
+  }
+
+  TEST(${TEST_NAME}, beta) {
+    $if ISA_CHECK:
+      ${ISA_CHECK};
+    for (float beta : std::vector<float>({0.3f, 3.0f})) {
+      for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
+        VUnOpMicrokernelTester()
+          .batch_size(batch_size)
+          .beta(beta)
+          .Test(${", ".join(TEST_ARGS)});
+      }
+    }
+  }
 """
 
 
@@ -136,7 +177,7 @@
     "xnn_f32_vunary_ukernel_function(%s)" % ukernel,
     "VUnOpMicrokernelTester::OpType::%s" % op_type,
   ]
-  if not isa or isa == "psimd":
+  if not isa:
     test_args.append("VUnOpMicrokernelTester::Variant::Scalar")
   return xngen.preprocess(BINOP_TEST_TEMPLATE, {
       "TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
diff --git a/tools/xnncommon.py b/tools/xnncommon.py
index 52f26a4..398612f 100644
--- a/tools/xnncommon.py
+++ b/tools/xnncommon.py
@@ -48,7 +48,6 @@
   "wasm32": ["wasm", "wasmsimd"],
   "wasm": ["wasm", "wasmsimd"],
   "wasmsimd": ["wasmsimd"],
-  "psimd": [],
 }
 
 _ISA_TO_CHECK_MAP = {
@@ -67,7 +66,6 @@
   "fma3": "TEST_REQUIRES_X86_FMA3",
   "avx512f": "TEST_REQUIRES_X86_AVX512F",
   "avx512skx": "TEST_REQUIRES_X86_AVX512SKX",
-  "psimd": "TEST_REQUIRES_PSIMD",
 }
 
 
@@ -99,13 +97,7 @@
     guard = " || ".join(map(_ARCH_TO_MACRO_MAP.get, arch))
     if assembly:
       guard += " && XNN_ENABLE_ASSEMBLY"
-    return "#if %s\n" % guard + \
-      _indent(test_case) + "\n" + \
-      "#endif  // %s\n" % guard
-  elif isa == "psimd":
-    guard = "!XNN_ARCH_WASM && !XNN_COMPILER_MSVC && !XNN_COMPILER_ICC"
-    return "#if %s\n" % guard + \
-      _indent(test_case) + "\n" + \
+    return "#if %s\n" % guard + _indent(test_case) + "\n" + \
       "#endif  // %s\n" % guard
   else:
     return test_case